python_code
stringlengths 0
229k
|
---|
import logging
from typing import Optional, Tuple, List
import torch
from torch import Tensor, nn
from torch.nn import Module
_LG = logging.getLogger(__name__)
class LayerNorm(nn.LayerNorm):
"""Layer norm with transpose"""
def forward(self, input: Tensor) -> Tensor:
x = input.transpose(-2, -1)
x = nn.functional.layer_norm(
x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.transpose(-2, -1)
return x
class ConvLayerBlock(Module):
"""Convolution unit of FeatureExtractor"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
bias: bool,
layer_norm: Optional[Module],
):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.layer_norm = layer_norm
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
bias=bias,
)
def forward(
self,
x: Tensor,
length: Optional[Tensor],
) -> Tuple[Tensor, Optional[Tensor]]:
"""
Args:
x (Tensor): Shape: ``[batch, in_channels, in_frame]``.
length (Tensor or None, optional): Shape ``[batch, ]``.
Returns:
Tensor: Shape ``[batch, out_channels, out_frames]``.
Optional[Tensor]: Shape ``[batch, ]``.
"""
x = self.conv(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
x = nn.functional.gelu(x)
if length is not None:
length = torch.div(length - self.kernel_size, self.stride, rounding_mode='floor') + 1
# When input length is 0, the resulting length can be negative. So fix it here.
length = torch.max(torch.zeros_like(length), length)
return x, length
class FeatureExtractor(Module):
"""Extract features from audio
Args:
conv_layers (nn.ModuleList):
convolution layers
"""
def __init__(
self,
conv_layers: nn.ModuleList,
):
super().__init__()
self.conv_layers = conv_layers
def forward(
self,
x: Tensor,
length: Optional[Tensor],
) -> Tuple[Tensor, Optional[Tensor]]:
"""
Args:
x (Tensor):
Input Tensor representing a batch of audio,
shape: ``[batch, time]``.
length (Tensor or None, optional):
Valid length of each input sample. shape: ``[batch, ]``.
Returns:
Tensor:
The resulting feature, shape: ``[batch, frame, feature]``
Optional[Tensor]:
Valid length of each output sample. shape: ``[batch, ]``.
"""
if x.ndim != 2:
raise ValueError(
"Expected the input Tensor to be 2D (batch, time), "
"but received {list(x.shape)}")
x = x.unsqueeze(1) # (batch, channel==1, frame)
for layer in self.conv_layers:
x, length = layer(x, length) # (batch, feature, frame)
x = x.transpose(1, 2) # (batch, frame, feature)
return x, length
class FeatureProjection(Module):
"""Layer that connects FeatureExtractor and Encoder
Projects features to encoder dimension.
Args:
in_features (int): Input feature dim.
out_features (int): Output feature dim.
dropout (float): Dropout probability.
"""
def __init__(
self,
in_features: int,
out_features: int,
dropout: float,
):
super().__init__()
self.layer_norm = nn.LayerNorm(in_features)
self.projection = nn.Linear(in_features, out_features,)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Args:
x (Tensor):
Feature Tensor. shape: ``[batch, frame, in_feature]``
Returns:
Tensor: Projected features. ``[batch, frame, out_feature]``.
"""
x = self.layer_norm(x)
x = self.projection(x)
x = self.dropout(x)
return x
class ConvolutionalPositionalEmbedding(Module):
"""Positional embedding which is placed at the beginning of Transformer.
Args:
embed_dim (int): Feature dimension of the input Tensor.
kernel_size (int): The number of frames to be use.
groups (int): The number of groups in feature dimensions.
"""
def __init__(
self,
embed_dim: int,
kernel_size: int,
groups: int,
):
super().__init__()
self.embed_dim = embed_dim
self.conv = nn.Conv1d(
in_channels=embed_dim,
out_channels=embed_dim,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=groups,
)
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.num_remove: int = 1 if kernel_size % 2 == 0 else 0
def __prepare_scriptable__(self):
for hook in self.conv._forward_pre_hooks.values():
# The hook we want to remove is an instance of WeightNorm class, so
# normally we would do `if isinstance(...)` but this class is not accessible
# because of shadowing, so we check the module name directly.
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
if (
hook.__module__ == 'torch.nn.utils.weight_norm' and
hook.__class__.__name__ == 'WeightNorm'
):
_LG.warning('Removing weight_norm from %s', self.__class__.__name__)
torch.nn.utils.remove_weight_norm(self.conv)
return self
def forward(self, x):
"""
Args:
x (Tensor): shape ``[batch, frame, feature]``.
Returns:
Tensor: The resulting feature. Shape ``[batch, frame, feature]``.
"""
x = x.transpose(-2, -1)
x = self.conv(x)
if self.num_remove > 0:
x = x[..., :-self.num_remove]
x = torch.nn.functional.gelu(x)
x = x.transpose(-2, -1)
return x
class SelfAttention(Module):
"""Multihead Self Attention module
Args:
embed_dim (int): Total dimension of the model.
num_heads (int): The number of heads.
dropout (float, optional):
Dropout probabiliry on attn_output_weights. Default: ``0.0``
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
):
super().__init__()
head_dim = embed_dim // num_heads
if head_dim * num_heads != embed_dim:
raise ValueError(f"`embed_dim ({embed_dim})` is not divisible by `num_heads ({num_heads})`")
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = torch.nn.Dropout(dropout)
self.head_dim = head_dim
self.scaling = self.head_dim ** -0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
def forward(
self,
x: Tensor,
attention_mask: Optional[Tensor] = None,
) -> Tensor:
"""
Args:
x (Tensor): shape: ``[batch_size, sequence_length, embed_dim]``.
attention_mask (Tensor or None, optional):
shape: ``[batch_size, 1, sequence_length, sequence_length]``
Returns:
Tensor: The resulting tensor. shape: ``[batch, sequence_length, embed_dim]``
"""
if x.ndim != 3 or x.shape[2] != self.embed_dim:
raise ValueError(
f"The expected input shape is (batch, sequence, embed_dim=={self.embed_dim}). "
f"Found {x.shape}."
)
batch_size, length, embed_dim = x.size()
if attention_mask is not None:
shape_ = (batch_size, 1, length, length)
if attention_mask.size() != shape_:
raise ValueError(
f"The expected attention mask shape is {shape_}. "
f"Found {attention_mask.size()}."
)
shape = (batch_size, length, self.num_heads, self.head_dim)
q = self.q_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
k = self.k_proj(x).view(*shape).permute(0, 2, 3, 1) # B, nH, Hd, L
v = self.v_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
weights = self.scaling * (q @ k) # B, nH, L, L
if attention_mask is not None:
weights += attention_mask
weights = torch.nn.functional.softmax(weights, dim=-1)
weights = self.dropout(weights)
output = weights @ v # B, nH, L, Hd
output = output.transpose(2, 1).reshape(batch_size, length, embed_dim)
output = self.out_proj(output)
return output
class FeedForward(Module):
"""Layer that follows attention layer in encoder layer.
"""
def __init__(
self,
io_features: int,
intermediate_features: int,
intermediate_dropout: float,
output_dropout: float,
):
super().__init__()
self.intermediate_dense = nn.Linear(io_features, intermediate_features)
self.intermediate_dropout = nn.Dropout(intermediate_dropout)
self.output_dense = nn.Linear(intermediate_features, io_features)
self.output_dropout = nn.Dropout(output_dropout)
def forward(self, x):
"""
Args:
x (Tensor): shape: `(batch, sequence_length, io_features)`
Returns:
x (Tensor): shape: `(batch, sequence_length, io_features)`
"""
x = self.intermediate_dense(x)
x = torch.nn.functional.gelu(x)
x = self.intermediate_dropout(x)
x = self.output_dense(x)
x = self.output_dropout(x)
return x
class EncoderLayer(Module):
"""A layer unit in encoder. Combines multihead self attention and feed forward.
"""
def __init__(
self,
attention: Module,
dropout: float,
layer_norm_first: bool,
feed_forward: Module,
):
super().__init__()
self.attention = attention
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(attention.embed_dim)
self.layer_norm_first = layer_norm_first
self.feed_forward = feed_forward
self.final_layer_norm = nn.LayerNorm(attention.embed_dim)
def forward(
self,
x: Tensor,
attention_mask: Optional[Tensor] = None,
):
"""
Args:
x (Tensor): shape: `(batch, sequence_length, embed_dim)`
attention_mask (Tensor or None, optional):
shape: `(batch, 1, sequence_length, sequence_length)`
"""
residual = x
if self.layer_norm_first:
x = self.layer_norm(x)
x = self.attention(x, attention_mask)
x = self.dropout(x)
x = residual + x
if self.layer_norm_first:
x = x + self.feed_forward(self.final_layer_norm(x))
else:
x = self.layer_norm(x)
x = self.final_layer_norm(x + self.feed_forward(x))
return x
class Transformer(Module):
def __init__(
self,
pos_conv_embed: Module,
dropout: float,
layers: Module,
layer_norm_first: bool,
layer_drop: float,
):
super().__init__()
self.pos_conv_embed = pos_conv_embed
self.layer_norm = nn.LayerNorm(pos_conv_embed.embed_dim)
self.layer_norm_first = layer_norm_first
self.layer_drop = layer_drop
self.dropout = nn.Dropout(dropout)
self.layers = layers
def _preprocess(self, x: Tensor):
x = x + self.pos_conv_embed(x)
if self.layer_norm_first:
x = self.layer_norm(x)
x = self.dropout(x)
return x
def forward(
self,
x: Tensor,
attention_mask: Optional[Tensor] = None,
):
x = self._preprocess(x)
for layer in self.layers:
if not (self.training and torch.rand(1).item() <= self.layer_drop):
x = layer(x, attention_mask)
if not self.layer_norm_first:
x = self.layer_norm(x)
return x
def get_intermediate_outputs(
self,
x: Tensor,
attention_mask: Optional[Tensor] = None,
num_layers: Optional[int] = None,
) -> List[Tensor]:
if num_layers is not None:
if not 0 < num_layers <= len(self.layers):
raise ValueError(f'`num_layers` must be between [1, {len(self.layers)}]')
ret: List[Tensor] = []
x = self._preprocess(x)
for layer in self.layers:
x = layer(x, attention_mask)
ret.append(x)
if num_layers is not None and len(ret) >= num_layers:
return ret
return ret
class Encoder(Module):
def __init__(
self,
feature_projection: Module,
transformer: Module,
):
super().__init__()
self.feature_projection = feature_projection
self.transformer = transformer
def _preprocess(
self,
features: Tensor,
lengths: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
x = self.feature_projection(features)
mask: Optional[Tensor] = None
if lengths is not None:
batch_size, max_len, _ = x.shape
# create mask for padded elements and zero-out them
mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None]
x[mask] = 0.0
# extend the mask to attention shape and set weight
mask = -10000.0 * mask[:, None, None, :].to(dtype=features.dtype)
mask = mask.expand(batch_size, 1, max_len, max_len)
return x, mask
def forward(
self,
features: Tensor,
lengths: Optional[Tensor] = None,
) -> Tensor:
x, mask = self._preprocess(features, lengths)
x = self.transformer(x, attention_mask=mask)
return x
def extract_features(
self,
features: Tensor,
lengths: Optional[Tensor] = None,
num_layers: Optional[int] = None,
) -> List[Tensor]:
x, masks = self._preprocess(features, lengths)
return self.transformer.get_intermediate_outputs(
x, attention_mask=masks, num_layers=num_layers)
################################################################################
def _get_feature_extractor(
norm_mode: str,
shapes: List[Tuple[int, int, int]],
bias: bool,
) -> FeatureExtractor:
"""
Args:
norm_mode (str):
Either "group_norm" or "layer_norm".
If "group_norm", then a single normalization is applied
in the first convolution block. Otherwise, all the convolution
blocks will have layer normalization.
This option corresponds to "extractor_mode" from fairseq.
Expected values are "group_norm" for Base arch, and
"layer_norm" for Large arch.
shapes (list of tuple of int):
Configuration of convolution layers. List of convolution configuration,
i.e. ``[(output_channel, kernel_size, stride), ...]``
This option corresponds to "conv_feature_layers" from fairseq.
Expected values are
``[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2``
for all the architectures.
bias (bool):
Whether to include bias term to each convolution operation.
This option corresponds to "conv_bias" from fairseq.
Expected values are False for Base arch, and True for Large arch.
See Also:
* Original implementation
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L666-L733
* "extractor_mode"
- Def and base:
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L38-L45
- Large:
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L52
* "conv_feature_layers"
- Def, base and large:
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L94-L100
* "conv_bias"
- Def and base:
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L101-L103
- Large:
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L61
"""
assert norm_mode in ["group_norm", "layer_norm"]
blocks = []
in_channels = 1
for i, (out_channels, kernel_size, stride) in enumerate(shapes):
normalization = None
if norm_mode == "group_norm" and i == 0:
normalization = nn.GroupNorm(
num_groups=out_channels,
num_channels=out_channels,
affine=True,
)
elif norm_mode == "layer_norm":
normalization = LayerNorm(
normalized_shape=out_channels,
elementwise_affine=True,
)
blocks.append(
ConvLayerBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
bias=bias,
layer_norm=normalization,
)
)
in_channels = out_channels
return FeatureExtractor(nn.ModuleList(blocks))
def _get_encoder(
in_features: int,
embed_dim: int,
dropout_input: float,
pos_conv_kernel: int,
pos_conv_groups: int,
num_layers: int,
num_heads: int,
attention_dropout: float,
ff_interm_features: int,
ff_interm_dropout: float,
dropout: float,
layer_norm_first: bool,
layer_drop: float,
) -> Encoder:
"""
Args:
in_features (int): The number of input features.
embed_dim (int):
The dimension of embedding.
This option corresponds to "encoder_embed_dim" from fairseq.
Expected values are 768 for Base arch, and 1024 for Large arch.
dropout_input (float):
The dropout probability applied after the input feature is projected
to ``embed_dim``.
This option corresponds to "dropout_input" from fairseq.
Expected values are 0.1 for both Base and Large arch.
pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to "conv_pos" from fairseq.
Expected values are 128 for both Base and Large arch.
pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to "conv_pos_groups" from fairseq.
Expected values are 16 for both Base and Large arch.
num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to "encoder_layers" from fairseq.
Expected values are 12 for Base and 24 for Large arch.
num_heads (int):
The number of heads in self attention layers.
This option corresponds to "encoder_attention_heads" from fairseq.
Expected values are 12 for Base and 16 for Large arch.
attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to "attention_dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to "encoder_ffn_embed_dim" from fairseq.
Expected values are 3072 for Base and 4096 for Large arch.
ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to "activation_dropout" from fairseq.
Expected values are 0.1 for both Base and Large arch.
dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to "dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to "layer_norm_first" from fairseq.
Expected values are False for Base and True for Large arch.
layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to "layerdrop" from fairseq.
Expected values are 0.1 for both Base and Large arch.
See Also:
* "encoder_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64
* "dropout_input"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78
* "conv_pos"
- Def, base and large
NOTE: The description is wrong.
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207
- Usage
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756
* "conv_pos_groups"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211
* "encoder_layers"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63
* "encoder_attention_heads"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66
* "attention_dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60
* "encoder_ffn_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65
* "activation_dropout"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55
* "dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59
* "layer_norm_first"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53
* "layerdrop"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54
"""
feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
# Original impl
# https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
encoder_layers = nn.ModuleList()
for _ in range(num_layers):
attention = SelfAttention(
embed_dim=embed_dim,
num_heads=num_heads,
dropout=attention_dropout,
)
feed_forward = FeedForward(
io_features=embed_dim,
intermediate_features=ff_interm_features,
intermediate_dropout=ff_interm_dropout,
output_dropout=dropout,
)
encoder_layers.append(
EncoderLayer(
attention=attention,
dropout=dropout,
layer_norm_first=layer_norm_first,
feed_forward=feed_forward,
)
)
transformer = Transformer(
pos_conv_embed=pos_conv,
dropout=dropout,
layers=encoder_layers,
layer_norm_first=not layer_norm_first,
layer_drop=layer_drop,
)
return Encoder(feature_projection, transformer)
|
from .import_huggingface import import_huggingface_model
from .import_fairseq import import_fairseq_model
__all__ = [
'import_huggingface_model',
'import_fairseq_model',
]
|
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
'extractor_mode': f'{cfg.feat_extract_norm}_norm',
'extractor_conv_layer_config': list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
'extractor_conv_bias': cfg.conv_bias,
'encoder_embed_dim': cfg.hidden_size,
'encoder_projection_dropout': cfg.feat_proj_dropout,
'encoder_pos_conv_kernel': cfg.num_conv_pos_embeddings,
'encoder_pos_conv_groups': cfg.num_conv_pos_embedding_groups,
'encoder_num_layers': cfg.num_hidden_layers,
'encoder_num_heads': cfg.num_attention_heads,
'encoder_attention_dropout': cfg.attention_dropout,
'encoder_ff_interm_features': cfg.intermediate_size,
'encoder_ff_interm_dropout': cfg.activation_dropout,
'encoder_dropout': cfg.hidden_dropout,
'encoder_layer_norm_first': cfg.do_stable_layer_norm,
'encoder_layer_drop': cfg.layerdrop,
}
return config
def _build(config, original):
if original.__class__.__name__ == 'Wav2Vec2ForCTC':
aux_num_out = original.config.vocab_size
wav2vec2 = original.wav2vec2
else:
_LG.warning(
'The model is not an instance of Wav2Vec2ForCTC. '
'"lm_head" module is not imported.')
aux_num_out = None
wav2vec2 = original
imported = wav2vec2_model(**config, aux_num_out=aux_num_out)
imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict())
imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict())
imported.encoder.transformer.load_state_dict(wav2vec2.encoder.state_dict())
if original.__class__.__name__ == 'Wav2Vec2ForCTC':
imported.aux.load_state_dict(original.lm_head.state_dict())
return imported
def import_huggingface_model(original: Module) -> Wav2Vec2Model:
"""import_huggingface_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model
Build Wav2Vec2Model from the corresponding model object of Hugging Face's `Transformers`_.
Args:
original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``.
Returns:
Wav2Vec2Model: Imported model.
Example
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
>>>
>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = import_huggingface_model(original)
>>>
>>> waveforms, _ = torchaudio.load("audio.wav")
>>> logits, _ = model(waveforms)
.. _Transformers: https://huggingface.co/transformers/
"""
_LG.info('Importing model.')
_LG.info('Loading model configuration.')
config = _get_config(original.config)
_LG.debug(' - config: %s', config)
_LG.info('Building model.')
imported = _build(config, original)
return imported
|
"""Import fariseq's wav2vec2.0 pretrained weights to torchaudios's format.
For this module to work, you need `fairseq`.
"""
import re
from torch.nn import Module
from ..model import Wav2Vec2Model, wav2vec2_model
def _parse_config(w2v_model):
encoder = w2v_model.encoder
conv_layers = w2v_model.feature_extractor.conv_layers
extractor_mode = 'layer_norm'
if 'GroupNorm' in conv_layers[0][2].__class__.__name__:
extractor_mode = 'group_norm'
else:
extractor_mode = 'layer_norm'
conv_layer_config = [(l[0].out_channels, l[0].kernel_size[0], l[0].stride[0]) for l in conv_layers]
if all(l[0].bias is None for l in conv_layers):
conv_bias = False
elif all(l[0].bias is not None for l in conv_layers):
conv_bias = True
else:
raise ValueError(
'Either all the convolutions layers have bias term or none of them should.')
config = {
'extractor_mode': extractor_mode,
'extractor_conv_layer_config': conv_layer_config,
'extractor_conv_bias': conv_bias,
'encoder_embed_dim': w2v_model.post_extract_proj.out_features,
'encoder_projection_dropout': w2v_model.dropout_input.p,
'encoder_pos_conv_kernel': encoder.pos_conv[0].kernel_size[0],
'encoder_pos_conv_groups': encoder.pos_conv[0].groups,
'encoder_num_layers': len(encoder.layers),
'encoder_num_heads': encoder.layers[0].self_attn.num_heads,
'encoder_attention_dropout': encoder.layers[0].self_attn.dropout_module.p,
'encoder_ff_interm_features': encoder.layers[0].fc1.out_features,
'encoder_ff_interm_dropout': encoder.layers[0].dropout2.p,
'encoder_dropout': encoder.layers[0].dropout3.p,
'encoder_layer_norm_first': encoder.layer_norm_first,
'encoder_layer_drop': encoder.layerdrop,
}
return config
def _map_key(key):
key_ = key
if key.startswith('w2v_model.'):
key = key.replace('w2v_model.', '')
if re.match(r'(mask_emb|quantizer|project_q|final_proj|mask_emb)', key):
return None
# Feature Extractor
# Group norm when "extractor_mode" is "default".
# (Only the first layer)
# "conv_layers.0.2.weight" -> "conv_layers.0.layer_norm.weight"
# "conv_layers.0.2.bias" -> "conv_layers.0.layer_norm.bias"
match = re.match(r'feature_extractor\.conv_layers\.0\.2\.(weight|bias)', key)
if match:
return f"feature_extractor.conv_layers.0.layer_norm.{match.group(1)}"
# Convolutions
# "conv_layers.X.0.weight" -> "conv_layers.X.conv.weight"
# "conv_layers.X.0.bias" -> "conv_layers.X.conv.bias"
match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.0\.(weight|bias)', key)
if match:
return f"feature_extractor.conv_layers.{match.group(1)}.conv.{match.group(2)}"
# Layer norm when "extractor_mode" is "layer_norm".
# "conv_layers.X.2.1.weight" -> "conv_layers.X.layer_norm.weight"
# "conv_layers.X.2.1.bias" -> "conv_layers.X.layer_norm.bias"
match = re.match(r'feature_extractor\.conv_layers\.(\d+)\.2\.1\.(weight|bias)', key)
if match:
return f"feature_extractor.conv_layers.{match.group(1)}.layer_norm.{match.group(2)}"
match = re.match(r"post_extract_proj\.(weight|bias)", key)
# Encoder - Feature projection
if match:
return f"encoder.feature_projection.projection.{match.group(1)}"
match = re.match(r"layer_norm\.(weight|bias)", key)
if match:
return f"encoder.feature_projection.layer_norm.{match.group(1)}"
# Encoder - Transformer - Convolutional positional embedding
match = re.match(r"encoder\.pos_conv\.0\.(bias|weight_g|weight_v)", key)
if match:
return f"encoder.transformer.pos_conv_embed.conv.{match.group(1)}"
match = re.match(r"encoder\.layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layer_norm.{match.group(1)}"
# Encoder - Transformer - Self attention layers
match = re.match(r"encoder\.layers\.(\d+)\.self_attn\.((k_|v_|q_|out_)proj\.(weight|bias))", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.attention.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.self_attn_layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.layer_norm.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.fc1\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.feed_forward.intermediate_dense.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.fc2\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.feed_forward.output_dense.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.final_layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.final_layer_norm.{match.group(2)}"
match = re.match(r"proj\.(weight|bias)", key)
# Auxiliary Module
# Only relevant when loading fine-tuned models
if match:
return f"aux.{match.group(1)}"
# HuBERT Extension
if key in ['label_embs_concat']:
return key
raise ValueError(f'Unexpected key: {key_}')
def _convert_state_dict(state_dict):
converted = {}
for k, v in state_dict.items():
k = _map_key(k)
if k is not None:
converted[k] = v
return converted
def import_fairseq_model(original: Module) -> Wav2Vec2Model:
# Overriding the signature so that the types are correct on Sphinx
"""import_fairseq_model(original: torch.nn.Module) -> torchaudio.models.Wav2Vec2Model
Build Wav2Vec2Model from the corresponding model object of `fairseq`_.
Args:
original (torch.nn.Module):
An instance of fairseq's Wav2Vec2.0 or HuBERT model.
One of ``fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder``,
``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model`` or
``fairseq.models.hubert.hubert_asr.HubertEncoder``.
Returns:
Wav2Vec2Model: Imported model.
Example - Loading pretrain-only model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
>>>
>>> # Load model using fairseq
>>> model_file = 'wav2vec_small.pt'
>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
>>> original = model[0]
>>> imported = import_fairseq_model(original)
>>>
>>> # Perform feature extraction
>>> waveform, _ = torchaudio.load('audio.wav')
>>> features, _ = imported.extract_features(waveform)
>>>
>>> # Compare result with the original model from fairseq
>>> reference = original.feature_extractor(waveform).transpose(1, 2)
>>> torch.testing.assert_allclose(features, reference)
Example - Fine-tuned model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
>>>
>>> # Load model using fairseq
>>> model_file = 'wav2vec_small_960h.pt'
>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
>>> original = model[0]
>>> imported = import_fairseq_model(original.w2v_encoder)
>>>
>>> # Perform encoding
>>> waveform, _ = torchaudio.load('audio.wav')
>>> emission, _ = imported(waveform)
>>>
>>> # Compare result with the original model from fairseq
>>> mask = torch.zeros_like(waveform)
>>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1)
>>> torch.testing.assert_allclose(emission, reference)
.. _fairseq: https://github.com/pytorch/fairseq
"""
class_ = original.__class__.__name__
if class_ == 'Wav2Vec2Model':
return _import_wav2vec2_pretraining(original)
if class_ == 'Wav2VecEncoder':
return _import_wav2vec2_finetuning(original)
if class_ == 'HubertModel':
return _import_hubert_pretraining(original)
if class_ == 'HubertEncoder':
return _import_hubert_finetuning(original)
raise ValueError(
f'Expected an instance of `Wav2Vec2Model` or `Wav2VecEncoder`. Found: {class_}')
def _import_wav2vec2_finetuning(original: Module) -> Wav2Vec2Model:
config = _parse_config(original.w2v_model)
model = wav2vec2_model(**config, aux_num_out=original.proj.out_features)
model.load_state_dict(_convert_state_dict(original.state_dict()))
return model
def _import_wav2vec2_pretraining(original: Module) -> Wav2Vec2Model:
config = _parse_config(original)
model = wav2vec2_model(**config, aux_num_out=None)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
def _import_hubert_finetuning(original: Module) -> Wav2Vec2Model:
config = _parse_config(original.w2v_model)
model = wav2vec2_model(**config, aux_num_out=original.proj.out_features)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
def _import_hubert_pretraining(original: Module) -> Wav2Vec2Model:
config = _parse_config(original)
model = wav2vec2_model(**config, aux_num_out=None)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
|
import math
from typing import List, Optional, Tuple
import torch
__all__ = ["Emformer"]
def _lengths_to_padding_mask(lengths: torch.Tensor) -> torch.Tensor:
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
def _gen_padding_mask(
utterance: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
lengths: torch.Tensor,
mems: torch.Tensor,
left_context_key: Optional[torch.Tensor] = None,
) -> Optional[torch.Tensor]:
T = right_context.size(0) + utterance.size(0) + summary.size(0)
B = right_context.size(1)
if B == 1:
padding_mask = None
else:
right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0)
left_context_blocks_length = (
left_context_key.size(0) if left_context_key is not None else 0
)
klengths = (
lengths
+ mems.size(0)
+ right_context_blocks_length
+ left_context_blocks_length
)
padding_mask = _lengths_to_padding_mask(lengths=klengths)
return padding_mask
def _get_activation_module(activation: str) -> torch.nn.Module:
if activation == "relu":
return torch.nn.ReLU()
elif activation == "gelu":
return torch.nn.GELU()
elif activation == "silu":
return torch.nn.SiLU()
else:
raise ValueError(f"Unsupported activation {activation}")
def _get_weight_init_gains(
weight_init_scale_strategy: Optional[str], num_layers: int
) -> List[Optional[float]]:
if weight_init_scale_strategy is None:
return [None for _ in range(num_layers)]
elif weight_init_scale_strategy == "depthwise":
return [1.0 / math.sqrt(layer_idx + 1) for layer_idx in range(num_layers)]
elif weight_init_scale_strategy == "constant":
return [1.0 / math.sqrt(2) for layer_idx in range(num_layers)]
else:
raise ValueError(
f"Unsupported weight_init_scale_strategy value {weight_init_scale_strategy}"
)
def _gen_attention_mask_block(
col_widths: List[int], col_mask: List[bool], num_rows: int, device: torch.device
) -> torch.Tensor:
assert len(col_widths) == len(
col_mask
), "Length of col_widths must match that of col_mask"
mask_block = [
torch.ones(num_rows, col_width, device=device)
if is_ones_col
else torch.zeros(num_rows, col_width, device=device)
for col_width, is_ones_col in zip(col_widths, col_mask)
]
return torch.cat(mask_block, dim=1)
class _EmformerAttention(torch.nn.Module):
r"""Emformer layer attention module.
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads in each Emformer layer.
dropout (float, optional): dropout probability. (Default: 0.0)
weight_init_gain (float or None, optional): scale factor to apply when initializing
attention module parameters. (Default: ``None``)
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
dropout: float = 0.0,
weight_init_gain: Optional[float] = None,
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
if input_dim % num_heads != 0:
raise ValueError(
f"input_dim ({input_dim}) is not a multiple of num_heads ({num_heads})."
)
self.input_dim = input_dim
self.num_heads = num_heads
self.dropout = dropout
self.tanh_on_mem = tanh_on_mem
self.negative_inf = negative_inf
self.scaling = (self.input_dim // self.num_heads) ** -0.5
self.emb_to_key_value = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.emb_to_query = torch.nn.Linear(input_dim, input_dim, bias=True)
self.out_proj = torch.nn.Linear(input_dim, input_dim, bias=True)
if weight_init_gain:
torch.nn.init.xavier_uniform_(
self.emb_to_key_value.weight, gain=weight_init_gain
)
torch.nn.init.xavier_uniform_(
self.emb_to_query.weight, gain=weight_init_gain
)
def _gen_key_value(
self, input: torch.Tensor, mems: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
T, _, _ = input.shape
summary_length = mems.size(0) + 1
right_ctx_utterance_block = input[: T - summary_length]
mems_right_ctx_utterance_block = torch.cat([mems, right_ctx_utterance_block])
key, value = self.emb_to_key_value(mems_right_ctx_utterance_block).chunk(
chunks=2, dim=2
)
return key, value
def _gen_attention_probs(
self,
attention_weights: torch.Tensor,
attention_mask: torch.Tensor,
padding_mask: Optional[torch.Tensor],
) -> torch.Tensor:
attention_weights_float = attention_weights.float()
attention_weights_float = attention_weights_float.masked_fill(
attention_mask.unsqueeze(0), self.negative_inf
)
T = attention_weights.size(1)
B = attention_weights.size(0) // self.num_heads
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, -1
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), self.negative_inf
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, -1
)
attention_probs = torch.nn.functional.softmax(
attention_weights_float, dim=-1
).type_as(attention_weights)
return torch.nn.functional.dropout(
attention_probs, p=float(self.dropout), training=self.training
)
def _forward_impl(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
left_context_key: Optional[torch.Tensor] = None,
left_context_val: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
B = utterance.size(1)
T = right_context.size(0) + utterance.size(0) + summary.size(0)
# Compute query with [right context, utterance, summary].
query = self.emb_to_query(torch.cat([right_context, utterance, summary]))
# Compute key and value with [mems, right context, utterance].
key, value = self.emb_to_key_value(
torch.cat([mems, right_context, utterance])
).chunk(chunks=2, dim=2)
if left_context_key is not None and left_context_val is not None:
right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0)
key = torch.cat(
[
key[: mems.size(0) + right_context_blocks_length],
left_context_key,
key[mems.size(0) + right_context_blocks_length:],
],
)
value = torch.cat(
[
value[: mems.size(0) + right_context_blocks_length],
left_context_val,
value[mems.size(0) + right_context_blocks_length:],
],
)
# Compute attention weights from query, key, and value.
reshaped_query, reshaped_key, reshaped_value = [
tensor.contiguous()
.view(-1, B * self.num_heads, self.input_dim // self.num_heads)
.transpose(0, 1)
for tensor in [query, key, value]
]
attention_weights = torch.bmm(
reshaped_query * self.scaling, reshaped_key.transpose(1, 2)
)
# Compute padding mask.
padding_mask = _gen_padding_mask(
utterance, right_context, summary, lengths, mems, left_context_key
)
# Compute attention probabilities.
attention_probs = self._gen_attention_probs(
attention_weights, attention_mask, padding_mask
)
# Compute attention.
attention = torch.bmm(attention_probs, reshaped_value)
assert attention.shape == (
B * self.num_heads,
T,
self.input_dim // self.num_heads,
)
attention = attention.transpose(0, 1).contiguous().view(T, B, self.input_dim)
# Apply output projection.
output_right_context_mems = self.out_proj(attention)
summary_length = summary.size(0)
output_right_context = output_right_context_mems[: T - summary_length]
output_mems = output_right_context_mems[T - summary_length:]
if self.tanh_on_mem:
output_mems = torch.tanh(output_mems)
else:
output_mems = torch.clamp(output_mems, min=-10, max=10)
return output_right_context, output_mems, key, value
def forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
S: number of summary elements;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape `(R, B, D)`.
summary (torch.Tensor): summary elements, with shape `(S, B, D)`.
mems (torch.Tensor): memory elements, with shape `(M, B, D)`.
attention_mask (torch.Tensor): attention mask for underlying attention module.
Returns:
(Tensor, Tensor):
Tensor
output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`.
Tensor
updated memory elements, with shape `(M, B, D)`.
"""
output, output_mems, _, _ = self._forward_impl(
utterance, lengths, right_context, summary, mems, attention_mask
)
return output, output_mems[:-1]
@torch.jit.export
def infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
left_context_key: torch.Tensor,
left_context_val: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Forward pass for inference.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
S: number of summary elements;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape `(R, B, D)`.
summary (torch.Tensor): summary elements, with shape `(S, B, D)`.
mems (torch.Tensor): memory elements, with shape `(M, B, D)`.
left_context_key (torch.Tensor): left context attention key computed from preceding invocation.
left_context_val (torch.Tensor): left context attention value computed from preceding invocation.
Returns:
(Tensor, Tensor, Tensor, and Tensor):
Tensor
output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`.
Tensor
updated memory elements, with shape `(M, B, D)`.
Tensor
attention key computed for left context and utterance.
Tensor
attention value computed for left context and utterance.
"""
query_dim = right_context.size(0) + utterance.size(0) + summary.size(0)
key_dim = (
right_context.size(0)
+ utterance.size(0)
+ mems.size(0)
+ left_context_key.size(0)
)
attention_mask = torch.zeros(query_dim, key_dim).to(
dtype=torch.bool, device=utterance.device
)
attention_mask[-1, : mems.size(0)] = True
output, output_mems, key, value = self._forward_impl(
utterance,
lengths,
right_context,
summary,
mems,
attention_mask,
left_context_key=left_context_key,
left_context_val=left_context_val,
)
return (
output,
output_mems,
key[mems.size(0) + right_context.size(0):],
value[mems.size(0) + right_context.size(0):],
)
class _EmformerLayer(torch.nn.Module):
r"""Emformer layer that constitutes Emformer.
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads.
ffn_dim: (int): hidden layer dimension of feedforward network.
dropout (float, optional): dropout probability. (Default: 0.0)
activation (str, optional): activation function to use in feedforward network.
Must be one of ("relu", "gelu", "silu"). (Default: "relu")
left_context_length (int, optional): length of left context. (Default: 0)
segment_length (int, optional): length of each input segment. (Default: 128)
max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0)
weight_init_gain (float or None, optional): scale factor to apply when initializing
attention module parameters. (Default: ``None``)
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
ffn_dim: int,
dropout: float = 0.0,
activation: str = "relu",
left_context_length: int = 0,
segment_length: int = 128,
max_memory_size: int = 0,
weight_init_gain: Optional[float] = None,
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
self.attention = _EmformerAttention(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout,
weight_init_gain=weight_init_gain,
tanh_on_mem=tanh_on_mem,
negative_inf=negative_inf,
)
self.dropout = torch.nn.Dropout(dropout)
self.memory_op = torch.nn.AvgPool1d(
kernel_size=segment_length, stride=segment_length, ceil_mode=True
)
activation_module = _get_activation_module(activation)
self.pos_ff = torch.nn.Sequential(
torch.nn.LayerNorm(input_dim),
torch.nn.Linear(input_dim, ffn_dim),
activation_module,
torch.nn.Dropout(dropout),
torch.nn.Linear(ffn_dim, input_dim),
torch.nn.Dropout(dropout),
)
self.layer_norm_input = torch.nn.LayerNorm(input_dim)
self.layer_norm_output = torch.nn.LayerNorm(input_dim)
self.left_context_length = left_context_length
self.segment_length = segment_length
self.max_memory_size = max_memory_size
self.input_dim = input_dim
self.use_mem = max_memory_size > 0
def _init_state(
self, batch_size: int, device: Optional[torch.device]
) -> List[torch.Tensor]:
empty_memory = torch.zeros(
self.max_memory_size, batch_size, self.input_dim, device=device
)
left_context_key = torch.zeros(
self.left_context_length, batch_size, self.input_dim, device=device
)
left_context_val = torch.zeros(
self.left_context_length, batch_size, self.input_dim, device=device
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
def _unpack_state(
self, utterance: torch.Tensor, mems: torch.Tensor, state: List[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
past_length = state[3][0][0].item()
past_left_context_length = min(self.left_context_length, past_length)
past_mem_length = min(
self.max_memory_size, math.ceil(past_length / self.segment_length)
)
pre_mems = state[0][self.max_memory_size - past_mem_length:]
lc_key = state[1][self.left_context_length - past_left_context_length:]
lc_val = state[2][self.left_context_length - past_left_context_length:]
return pre_mems, lc_key, lc_val
def _pack_state(
self,
next_k: torch.Tensor,
next_v: torch.Tensor,
update_length: int,
mems: torch.Tensor,
state: List[torch.Tensor],
) -> List[torch.Tensor]:
new_k = torch.cat([state[1], next_k])
new_v = torch.cat([state[2], next_v])
state[0] = torch.cat([state[0], mems])[-self.max_memory_size:]
state[1] = new_k[new_k.shape[0] - self.left_context_length:]
state[2] = new_v[new_v.shape[0] - self.left_context_length:]
state[3] = state[3] + update_length
return state
def _process_attention_output(
self,
rc_output: torch.Tensor,
utterance: torch.Tensor,
right_context: torch.Tensor,
) -> torch.Tensor:
result = self.dropout(rc_output) + torch.cat([right_context, utterance])
result = self.pos_ff(result) + result
result = self.layer_norm_output(result)
return result
def _apply_pre_attention_layer_norm(
self, utterance: torch.Tensor, right_context: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
layer_norm_input = self.layer_norm_input(torch.cat([right_context, utterance]))
return (
layer_norm_input[right_context.size(0):],
layer_norm_input[: right_context.size(0)],
)
def _apply_post_attention_ffn(
self, rc_output: torch.Tensor, utterance: torch.Tensor, right_context: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
rc_output = self._process_attention_output(rc_output, utterance, right_context)
return rc_output[right_context.size(0):], rc_output[: right_context.size(0)]
def _apply_attention_forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
attention_mask: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
if attention_mask is None:
raise ValueError(
"attention_mask must be not None when for_inference is False"
)
if self.use_mem:
summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
else:
summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device)
rc_output, next_m = self.attention(
utterance=utterance,
lengths=lengths,
right_context=right_context,
summary=summary,
mems=mems,
attention_mask=attention_mask,
)
return rc_output, next_m
def _apply_attention_infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
state: Optional[List[torch.Tensor]],
) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:
if state is None:
state = self._init_state(utterance.size(1), device=utterance.device)
pre_mems, lc_key, lc_val = self._unpack_state(utterance, mems, state)
if self.use_mem:
summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
summary = summary[:1]
else:
summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device)
rc_output, next_m, next_k, next_v = self.attention.infer(
utterance=utterance,
lengths=lengths,
right_context=right_context,
summary=summary,
mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
)
state = self._pack_state(next_k, next_v, utterance.size(0), mems, state)
return rc_output, next_m, state
def forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape `(R, B, D)`.
mems (torch.Tensor): memory elements, with shape `(M, B, D)`.
attention_mask (torch.Tensor): attention mask for underlying attention module.
Returns:
(Tensor, Tensor, Tensor):
Tensor
encoded utterance frames, with shape `(T, B, D)`.
Tensor
updated right context frames, with shape `(R, B, D)`.
Tensor
updated memory elements, with shape `(M, B, D)`.
"""
(
layer_norm_utterance,
layer_norm_right_context,
) = self._apply_pre_attention_layer_norm(utterance, right_context)
rc_output, output_mems = self._apply_attention_forward(
layer_norm_utterance,
lengths,
layer_norm_right_context,
mems,
attention_mask,
)
output_utterance, output_right_context = self._apply_post_attention_ffn(
rc_output, utterance, right_context
)
return output_utterance, output_right_context, output_mems
@torch.jit.export
def infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
state: Optional[List[torch.Tensor]],
mems: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]:
r"""Forward pass for inference.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape `(R, B, D)`.
state (List[torch.Tensor] or None): list of tensors representing layer internal state
generated in preceding invocation of ``infer``.
mems (torch.Tensor): memory elements, with shape `(M, B, D)`.
Returns:
(Tensor, Tensor, List[torch.Tensor], Tensor):
Tensor
encoded utterance frames, with shape `(T, B, D)`.
Tensor
updated right context frames, with shape `(R, B, D)`.
List[Tensor]
list of tensors representing layer internal state
generated in current invocation of ``infer``.
Tensor
updated memory elements, with shape `(M, B, D)`.
"""
(
layer_norm_utterance,
layer_norm_right_context,
) = self._apply_pre_attention_layer_norm(utterance, right_context)
rc_output, output_mems, output_state = self._apply_attention_infer(
layer_norm_utterance, lengths, layer_norm_right_context, mems, state
)
output_utterance, output_right_context = self._apply_post_attention_ffn(
rc_output, utterance, right_context
)
return output_utterance, output_right_context, output_state, output_mems
class Emformer(torch.nn.Module):
r"""Implements the Emformer architecture introduced in
*Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency Streaming Speech Recognition*
[:footcite:`shi2021emformer`].
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads in each Emformer layer.
ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network.
num_layers (int): number of Emformer layers to instantiate.
dropout (float, optional): dropout probability. (Default: 0.0)
activation (str, optional): activation function to use in each Emformer layer's
feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu")
left_context_length (int, optional): length of left context. (Default: 0)
right_context_length (int, optional): length of right context. (Default: 0)
segment_length (int, optional): length of each input segment. (Default: 128)
max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0)
weight_init_scale_strategy (str, optional): per-layer weight initialization scaling
strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise")
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
Examples:
>>> emformer = Emformer(512, 8, 2048, 20)
>>> input = torch.rand(128, 400, 512) # batch, num_frames, feature_dim
>>> lengths = torch.randint(1, 200, (128,)) # batch
>>> output = emformer(input, lengths)
>>> output, lengths, states = emformer.infer(input, lengths, None)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
ffn_dim: int,
num_layers: int,
dropout: float = 0.0,
activation: str = "relu",
left_context_length: int = 0,
right_context_length: int = 0,
segment_length: int = 128,
max_memory_size: int = 0,
weight_init_scale_strategy: str = "depthwise",
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
self.use_mem = max_memory_size > 0
self.memory_op = torch.nn.AvgPool1d(
kernel_size=segment_length, stride=segment_length, ceil_mode=True,
)
weight_init_gains = _get_weight_init_gains(
weight_init_scale_strategy, num_layers
)
self.emformer_layers = torch.nn.ModuleList(
[
_EmformerLayer(
input_dim,
num_heads,
ffn_dim,
dropout=dropout,
activation=activation,
left_context_length=left_context_length,
segment_length=segment_length,
max_memory_size=max_memory_size,
weight_init_gain=weight_init_gains[layer_idx],
tanh_on_mem=tanh_on_mem,
negative_inf=negative_inf,
)
for layer_idx in range(num_layers)
]
)
self.left_context_length = left_context_length
self.right_context_length = right_context_length
self.segment_length = segment_length
self.max_memory_size = max_memory_size
def _gen_right_context(self, input: torch.Tensor) -> torch.Tensor:
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context_length) / self.segment_length)
right_context_blocks = []
for seg_idx in range(num_segs - 1):
start = (seg_idx + 1) * self.segment_length
end = start + self.right_context_length
right_context_blocks.append(input[start:end])
right_context_blocks.append(input[T - self.right_context_length:])
return torch.cat(right_context_blocks)
def _gen_attention_mask_col_widths(
self, seg_idx: int, utterance_length: int
) -> List[int]:
num_segs = math.ceil(utterance_length / self.segment_length)
rc = self.right_context_length
lc = self.left_context_length
rc_start = seg_idx * rc
rc_end = rc_start + rc
seg_start = max(seg_idx * self.segment_length - lc, 0)
seg_end = min((seg_idx + 1) * self.segment_length, utterance_length)
rc_length = self.right_context_length * num_segs
if self.use_mem:
m_start = max(seg_idx - self.max_memory_size, 0)
mem_length = num_segs - 1
col_widths = [
m_start, # before memory
seg_idx - m_start, # memory
mem_length - seg_idx, # after memory
rc_start, # before right context
rc, # right context
rc_length - rc_end, # after right context
seg_start, # before query segment
seg_end - seg_start, # query segment
utterance_length - seg_end, # after query segment
]
else:
col_widths = [
rc_start, # before right context
rc, # right context
rc_length - rc_end, # after right context
seg_start, # before query segment
seg_end - seg_start, # query segment
utterance_length - seg_end, # after query segment
]
return col_widths
def _gen_attention_mask(self, input: torch.Tensor) -> torch.Tensor:
utterance_length, batch_size, _ = input.shape
num_segs = math.ceil(utterance_length / self.segment_length)
rc_mask = []
query_mask = []
summary_mask = []
if self.use_mem:
num_cols = 9
# memory, right context, query segment
rc_q_cols_mask = [idx in [1, 4, 7] for idx in range(num_cols)]
# right context, query segment
s_cols_mask = [idx in [4, 7] for idx in range(num_cols)]
masks_to_concat = [rc_mask, query_mask, summary_mask]
else:
num_cols = 6
# right context, query segment
rc_q_cols_mask = [idx in [1, 4] for idx in range(num_cols)]
s_cols_mask = None
masks_to_concat = [rc_mask, query_mask]
for seg_idx in range(num_segs):
col_widths = self._gen_attention_mask_col_widths(seg_idx, utterance_length)
rc_mask_block = _gen_attention_mask_block(
col_widths, rc_q_cols_mask, self.right_context_length, input.device
)
rc_mask.append(rc_mask_block)
query_mask_block = _gen_attention_mask_block(
col_widths,
rc_q_cols_mask,
min(
self.segment_length,
utterance_length - seg_idx * self.segment_length,
),
input.device,
)
query_mask.append(query_mask_block)
if s_cols_mask is not None:
summary_mask_block = _gen_attention_mask_block(
col_widths, s_cols_mask, 1, input.device
)
summary_mask.append(summary_mask_block)
attention_mask = (
1 - torch.cat([torch.cat(mask) for mask in masks_to_concat])
).to(torch.bool)
return attention_mask
def forward(
self, input: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
T: number of frames;
D: feature dimension of each frame.
Args:
input (torch.Tensor): utterance frames right-padded with right context frames, with
shape `(B, T, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``input``.
Returns:
(Tensor, Tensor):
Tensor
output frames, with shape `(B, T - ``right_context_length``, D)`.
Tensor
output lengths, with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in output frames.
"""
input = input.permute(1, 0, 2)
right_context = self._gen_right_context(input)
utterance = input[: input.size(0) - self.right_context_length]
attention_mask = self._gen_attention_mask(utterance)
mems = (
self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:-1]
if self.use_mem
else torch.empty(0).to(dtype=input.dtype, device=input.device)
)
output = utterance
for layer in self.emformer_layers:
output, right_context, mems = layer(
output, lengths, right_context, mems, attention_mask
)
return output.permute(1, 0, 2), lengths
@torch.jit.export
def infer(
self,
input: torch.Tensor,
lengths: torch.Tensor,
states: Optional[List[List[torch.Tensor]]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]:
r"""Forward pass for inference.
B: batch size;
T: number of frames;
D: feature dimension of each frame.
Args:
input (torch.Tensor): utterance frames right-padded with right context frames, with
shape `(B, T, D)`.
lengths (torch.Tensor): with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in ``input``.
states (List[List[torch.Tensor]] or None, optional): list of lists of tensors
representing Emformer internal state generated in preceding invocation of ``infer``. (Default: ``None``)
Returns:
(Tensor, Tensor, List[List[Tensor]]):
Tensor
output frames, with shape `(B, T - ``right_context_length``, D)`.
Tensor
output lengths, with shape `(B,)` and i-th element representing
number of valid frames for i-th batch element in output frames.
List[List[Tensor]]
output states; list of lists of tensors representing Emformer internal state
generated in current invocation of ``infer``.
"""
input = input.permute(1, 0, 2)
right_context_start_idx = input.size(0) - self.right_context_length
right_context = input[right_context_start_idx:]
utterance = input[:right_context_start_idx]
output_lengths = torch.clamp(lengths - self.right_context_length, min=0)
mems = (
self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
if self.use_mem
else torch.empty(0).to(dtype=input.dtype, device=input.device)
)
output = utterance
output_states: List[List[torch.Tensor]] = []
for layer_idx, layer in enumerate(self.emformer_layers):
output, right_context, output_state, mems = layer.infer(
output,
output_lengths,
right_context,
None if states is None else states[layer_idx],
mems,
)
output_states.append(output_state)
return output.permute(1, 0, 2), output_lengths, output_states
|
from .emformer import Emformer
__all__ = ["Emformer"]
|
from . import kaldi
__all__ = [
'kaldi',
]
|
from typing import Tuple
import math
import torch
from torch import Tensor
import torchaudio
__all__ = [
'get_mel_banks',
'inverse_mel_scale',
'inverse_mel_scale_scalar',
'mel_scale',
'mel_scale_scalar',
'spectrogram',
'fbank',
'mfcc',
'vtln_warp_freq',
'vtln_warp_mel_freq',
]
# numeric_limits<float>::epsilon() 1.1920928955078125e-07
EPSILON = torch.tensor(torch.finfo(torch.float).eps)
# 1 milliseconds = 0.001 seconds
MILLISECONDS_TO_SECONDS = 0.001
# window types
HAMMING = 'hamming'
HANNING = 'hanning'
POVEY = 'povey'
RECTANGULAR = 'rectangular'
BLACKMAN = 'blackman'
WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
def _get_epsilon(device, dtype):
return EPSILON.to(device=device, dtype=dtype)
def _next_power_of_2(x: int) -> int:
r"""Returns the smallest power of 2 that is greater than x
"""
return 1 if x == 0 else 2 ** (x - 1).bit_length()
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
representing how the window is shifted along the waveform. Each row is a frame.
Args:
waveform (Tensor): Tensor of size ``num_samples``
window_size (int): Frame length
window_shift (int): Frame shift
snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends.
Returns:
Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
"""
assert waveform.dim() == 1
num_samples = waveform.size(0)
strides = (window_shift * waveform.stride(0), waveform.stride(0))
if snip_edges:
if num_samples < window_size:
return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
else:
m = 1 + (num_samples - window_size) // window_shift
else:
reversed_waveform = torch.flip(waveform, [0])
m = (num_samples + (window_shift // 2)) // window_shift
pad = window_size // 2 - window_shift // 2
pad_right = reversed_waveform
if pad > 0:
# torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
# but we want [2, 1, 0, 0, 1, 2]
pad_left = reversed_waveform[-pad:]
waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
else:
# pad is negative so we want to trim the waveform at the front
waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
sizes = (m, window_size)
return waveform.as_strided(sizes, strides)
def _feature_window_function(window_type: str,
window_size: int,
blackman_coeff: float,
device: torch.device,
dtype: int,
) -> Tensor:
r"""Returns a window function with the given type and size
"""
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
elif window_type == POVEY:
# like hanning but goes to zero at edges
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
elif window_type == RECTANGULAR:
return torch.ones(window_size, device=device, dtype=dtype)
elif window_type == BLACKMAN:
a = 2 * math.pi / (window_size - 1)
window_function = torch.arange(window_size, device=device, dtype=dtype)
# can't use torch.blackman_window as they use different coefficients
return (blackman_coeff - 0.5 * torch.cos(a * window_function) +
(0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype)
else:
raise Exception('Invalid window type ' + window_type)
def _get_log_energy(strided_input: Tensor,
epsilon: Tensor,
energy_floor: float) -> Tensor:
r"""Returns the log energy of size (m) for a strided_input (m,*)
"""
device, dtype = strided_input.device, strided_input.dtype
log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
if energy_floor == 0.0:
return log_energy
return torch.max(
log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
def _get_waveform_and_window_properties(waveform: Tensor,
channel: int,
sample_frequency: float,
frame_shift: float,
frame_length: float,
round_to_power_of_two: bool,
preemphasis_coefficient: float) -> Tuple[Tensor, int, int, int]:
r"""Gets the waveform and window properties
"""
channel = max(channel, 0)
assert channel < waveform.size(0), ('Invalid channel {} for size {}'.format(channel, waveform.size(0)))
waveform = waveform[channel, :] # size (n)
window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
assert 2 <= window_size <= len(
waveform), ('choose a window size {} that is [2, {}]'
.format(window_size, len(waveform)))
assert 0 < window_shift, '`window_shift` must be greater than 0'
assert padded_window_size % 2 == 0, 'the padded `window_size` must be divisible by two.' \
' use `round_to_power_of_two` or change `frame_length`'
assert 0. <= preemphasis_coefficient <= 1.0, '`preemphasis_coefficient` must be between [0,1]'
assert sample_frequency > 0, '`sample_frequency` must be greater than zero'
return waveform, window_shift, window_size, padded_window_size
def _get_window(waveform: Tensor,
padded_window_size: int,
window_size: int,
window_shift: int,
window_type: str,
blackman_coeff: float,
snip_edges: bool,
raw_energy: bool,
energy_floor: float,
dither: float,
remove_dc_offset: bool,
preemphasis_coefficient: float) -> Tuple[Tensor, Tensor]:
r"""Gets a window and its log energy
Returns:
(Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
# size (m, window_size)
strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
if dither != 0.0:
# Returns a random number strictly between 0 and 1
x = torch.max(epsilon, torch.rand(strided_input.shape, device=device, dtype=dtype))
rand_gauss = torch.sqrt(-2 * x.log()) * torch.cos(2 * math.pi * x)
strided_input = strided_input + rand_gauss * dither
if remove_dc_offset:
# Subtract each row/frame by its mean
row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1)
strided_input = strided_input - row_means
if raw_energy:
# Compute the log energy of each row/frame before applying preemphasis and
# window function
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
if preemphasis_coefficient != 0.0:
# strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
offset_strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (1, 0), mode='replicate').squeeze(0) # size (m, window_size + 1)
strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
# Apply window_function to each row/frame
window_function = _feature_window_function(
window_type, window_size, blackman_coeff, device, dtype).unsqueeze(0) # size (1, window_size)
strided_input = strided_input * window_function # size (m, window_size)
# Pad columns with zero until we reach size (m, padded_window_size)
if padded_window_size != window_size:
padding_right = padded_window_size - window_size
strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (0, padding_right), mode='constant', value=0).squeeze(0)
# Compute energy after window function (not the raw one)
if not raw_energy:
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
return strided_input, signal_log_energy
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
# subtracts the column mean of the tensor size (m, n) if subtract_mean=True
# it returns size (m, n)
if subtract_mean:
col_means = torch.mean(tensor, dim=0).unsqueeze(0)
tensor = tensor - col_means
return tensor
def spectrogram(waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_duration: float = 0.0,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
window_type: str = POVEY) -> Tensor:
r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's
compute-spectrogram-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A spectrogram identical to what Kaldi would output. The shape is
(m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0)
strided_input, signal_log_energy = _get_window(
waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff,
snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient)
# size (m, padded_window_size // 2 + 1, 2)
fft = torch.fft.rfft(strided_input)
# Convert the FFT into a power spectrum
power_spectrum = torch.max(fft.abs().pow(2.), epsilon).log() # size (m, padded_window_size // 2 + 1)
power_spectrum[:, 0] = signal_log_energy
power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean)
return power_spectrum
def inverse_mel_scale_scalar(mel_freq: float) -> float:
return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0)
def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
return 700.0 * ((mel_freq / 1127.0).exp() - 1.0)
def mel_scale_scalar(freq: float) -> float:
return 1127.0 * math.log(1.0 + freq / 700.0)
def mel_scale(freq: Tensor) -> Tensor:
return 1127.0 * (1.0 + freq / 700.0).log()
def vtln_warp_freq(vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq: float,
high_freq: float,
vtln_warp_factor: float,
freq: Tensor) -> Tensor:
r"""This computes a VTLN warping function that is not the same as HTK's one,
but has similar inputs (this function has the advantage of never producing
empty bins).
This function computes a warp function F(freq), defined between low_freq
and high_freq inclusive, with the following properties:
F(low_freq) == low_freq
F(high_freq) == high_freq
The function is continuous and piecewise linear with two inflection
points.
The lower inflection point (measured in terms of the unwarped
frequency) is at frequency l, determined as described below.
The higher inflection point is at a frequency h, determined as
described below.
If l <= f <= h, then F(f) = f/vtln_warp_factor.
If the higher inflection point (measured in terms of the unwarped
frequency) is at h, then max(h, F(h)) == vtln_high_cutoff.
Since (by the last point) F(h) == h/vtln_warp_factor, then
max(h, h/vtln_warp_factor) == vtln_high_cutoff, so
h = vtln_high_cutoff / max(1, 1/vtln_warp_factor).
= vtln_high_cutoff * min(1, vtln_warp_factor).
If the lower inflection point (measured in terms of the unwarped
frequency) is at l, then min(l, F(l)) == vtln_low_cutoff
This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor)
= vtln_low_cutoff * max(1, vtln_warp_factor)
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
freq (Tensor): given frequency in Hz
Returns:
Tensor: Freq after vtln warp
"""
assert vtln_low_cutoff > low_freq, 'be sure to set the vtln_low option higher than low_freq'
assert vtln_high_cutoff < high_freq, 'be sure to set the vtln_high option lower than high_freq [or negative]'
l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
h = vtln_high_cutoff * min(1.0, vtln_warp_factor)
scale = 1.0 / vtln_warp_factor
Fl = scale * l # F(l)
Fh = scale * h # F(h)
assert l > low_freq and h < high_freq
# slope of left part of the 3-piece linear function
scale_left = (Fl - low_freq) / (l - low_freq)
# [slope of center part is just "scale"]
# slope of right part of the 3-piece linear function
scale_right = (high_freq - Fh) / (high_freq - h)
res = torch.empty_like(freq)
outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq
before_l = torch.lt(freq, l) # freq < l
before_h = torch.lt(freq, h) # freq < h
after_h = torch.ge(freq, h) # freq >= h
# order of operations matter here (since there is overlapping frequency regions)
res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq)
res[before_h] = scale * freq[before_h]
res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq)
res[outside_low_high_freq] = freq[outside_low_high_freq]
return res
def vtln_warp_mel_freq(vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq, high_freq: float,
vtln_warp_factor: float,
mel_freq: Tensor) -> Tensor:
r"""
Args:
vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
low_freq (float): Lower frequency cutoffs in mel computation
high_freq (float): Upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): Given frequency in Mel
Returns:
Tensor: ``mel_freq`` after vtln warp
"""
return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, inverse_mel_scale(mel_freq)))
def get_mel_banks(num_bins: int,
window_length_padded: int,
sample_freq: float,
low_freq: float,
high_freq: float,
vtln_low: float,
vtln_high: float,
vtln_warp_factor: float) -> Tuple[Tensor, Tensor]:
"""
Returns:
(Tensor, Tensor): The tuple consists of ``bins`` (which is
melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is
center frequencies of bins of size (``num_bins``)).
"""
assert num_bins > 3, 'Must have at least 3 mel bins'
assert window_length_padded % 2 == 0
num_fft_bins = window_length_padded / 2
nyquist = 0.5 * sample_freq
if high_freq <= 0.0:
high_freq += nyquist
assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), \
('Bad values in options: low-freq {} and high-freq {} vs. nyquist {}'.format(low_freq, high_freq, nyquist))
# fft-bin width [think of it as Nyquist-freq / half-window-length]
fft_bin_width = sample_freq / window_length_padded
mel_low_freq = mel_scale_scalar(low_freq)
mel_high_freq = mel_scale_scalar(high_freq)
# divide by num_bins+1 in next line because of end-effects where the bins
# spread out to the sides.
mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
if vtln_high < 0.0:
vtln_high += nyquist
assert vtln_warp_factor == 1.0 or ((low_freq < vtln_low < high_freq) and
(0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)), \
('Bad values in options: vtln-low {} and vtln-high {}, versus '
'low-freq {} and high-freq {}'.format(vtln_low, vtln_high, low_freq, high_freq))
bin = torch.arange(num_bins).unsqueeze(1)
left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1)
center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1)
right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1)
if vtln_warp_factor != 1.0:
left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)
center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)
right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)
center_freqs = inverse_mel_scale(center_mel) # size (num_bins)
# size(1, num_fft_bins)
mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0)
# size (num_bins, num_fft_bins)
up_slope = (mel - left_mel) / (center_mel - left_mel)
down_slope = (right_mel - mel) / (right_mel - center_mel)
if vtln_warp_factor == 1.0:
# left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values
bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope))
else:
# warping can move the order of left_mel, center_mel, right_mel anywhere
bins = torch.zeros_like(up_slope)
up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel
down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel
bins[up_idx] = up_slope[up_idx]
bins[down_idx] = down_slope[down_idx]
return bins, center_freqs
def fbank(waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
use_log_fbank: bool = True,
use_power: bool = True,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY) -> Tensor:
r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's
compute-fbank-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features
(need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``)
use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``'povey'``)
Returns:
Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``)
where m is calculated in _get_strided
"""
device, dtype = waveform.device, waveform.dtype
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0, device=device, dtype=dtype)
# strided_input, size (m, padded_window_size) and signal_log_energy, size (m)
strided_input, signal_log_energy = _get_window(
waveform, padded_window_size, window_size, window_shift, window_type, blackman_coeff,
snip_edges, raw_energy, energy_floor, dither, remove_dc_offset, preemphasis_coefficient)
# size (m, padded_window_size // 2 + 1)
spectrum = torch.fft.rfft(strided_input).abs()
if use_power:
spectrum = spectrum.pow(2.)
# size (num_mel_bins, padded_window_size // 2)
mel_energies, _ = get_mel_banks(num_mel_bins, padded_window_size, sample_frequency,
low_freq, high_freq, vtln_low, vtln_high, vtln_warp)
mel_energies = mel_energies.to(device=device, dtype=dtype)
# pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1)
mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode='constant', value=0)
# sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins)
mel_energies = torch.mm(spectrum, mel_energies.T)
if use_log_fbank:
# avoid log of zero (which should be prevented anyway by dithering)
mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log()
# if use_energy then add it as the last column for htk_compat == true else first column
if use_energy:
signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1)
# returns size (m, num_mel_bins + 1)
if htk_compat:
mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1)
else:
mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1)
mel_energies = _subtract_column_mean(mel_energies, subtract_mean)
return mel_energies
def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor:
# returns a dct matrix of size (num_mel_bins, num_ceps)
# size (num_mel_bins, num_mel_bins)
dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, 'ortho')
# kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins)
# this would be the first column in the dct_matrix for torchaudio as it expects a
# right multiply (which would be the first column of the kaldi's dct_matrix as kaldi
# expects a left multiply e.g. dct_matrix * vector).
dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins))
dct_matrix = dct_matrix[:, :num_ceps]
return dct_matrix
def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor:
# returns size (num_ceps)
# Compute liftering coefficients (scaling on cepstral coeffs)
# coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected.
i = torch.arange(num_ceps)
return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter)
def mfcc(
waveform: Tensor,
blackman_coeff: float = 0.42,
cepstral_lifter: float = 22.0,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
num_ceps: int = 13,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY) -> Tensor:
r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's
compute-mfcc-feats.
Args:
waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``)
channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
this floor is applied to the zeroth component, representing the total signal energy. The floor on the
individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
(Default: ``0.0``)
htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible
features (need to change other parameters). (Default: ``False``)
low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``)
min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
to FFT. (Default: ``True``)
sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
specified there) (Default: ``16000.0``)
snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
in the file, and the number of frames depends on the frame_length. If False, the number of frames
depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
it this way. (Default: ``False``)
use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
negative, offset from high-mel-freq (Default: ``-500.0``)
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
(Default: ``"povey"``)
Returns:
Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``)
where m is calculated in _get_strided
"""
assert num_ceps <= num_mel_bins, 'num_ceps cannot be larger than num_mel_bins: %d vs %d' % (num_ceps, num_mel_bins)
device, dtype = waveform.device, waveform.dtype
# The mel_energies should not be squared (use_power=True), not have mean subtracted
# (subtract_mean=False), and use log (use_log_fbank=True).
# size (m, num_mel_bins + use_energy)
feature = fbank(waveform=waveform, blackman_coeff=blackman_coeff, channel=channel,
dither=dither, energy_floor=energy_floor, frame_length=frame_length,
frame_shift=frame_shift, high_freq=high_freq, htk_compat=htk_compat,
low_freq=low_freq, min_duration=min_duration, num_mel_bins=num_mel_bins,
preemphasis_coefficient=preemphasis_coefficient, raw_energy=raw_energy,
remove_dc_offset=remove_dc_offset, round_to_power_of_two=round_to_power_of_two,
sample_frequency=sample_frequency, snip_edges=snip_edges, subtract_mean=False,
use_energy=use_energy, use_log_fbank=True, use_power=True,
vtln_high=vtln_high, vtln_low=vtln_low, vtln_warp=vtln_warp, window_type=window_type)
if use_energy:
# size (m)
signal_log_energy = feature[:, num_mel_bins if htk_compat else 0]
# offset is 0 if htk_compat==True else 1
mel_offset = int(not htk_compat)
feature = feature[:, mel_offset:(num_mel_bins + mel_offset)]
# size (num_mel_bins, num_ceps)
dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device)
# size (m, num_ceps)
feature = feature.matmul(dct_matrix)
if cepstral_lifter != 0.0:
# size (1, num_ceps)
lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0)
feature *= lifter_coeffs.to(device=device, dtype=dtype)
# if use_energy then replace the last column for htk_compat == true else first column
if use_energy:
feature[:, 0] = signal_log_energy
if htk_compat:
energy = feature[:, 0].unsqueeze(1) # size (m, 1)
feature = feature[:, 1:] # size (m, num_ceps - 1)
if not use_energy:
# scale on C0 (actually removing a scale we previously added that's
# part of one common definition of the cosine transform.)
energy *= math.sqrt(2)
feature = torch.cat((feature, energy), dim=1)
feature = _subtract_column_mean(feature, subtract_mean)
return feature
|
import os
from typing import List, Tuple, Optional
import torch
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from torchaudio.utils.sox_utils import list_effects
@_mod_utils.requires_sox()
def init_sox_effects():
"""Initialize resources required to use sox effects.
Note:
You do not need to call this function manually. It is called automatically.
Once initialized, you do not need to call this function again across the multiple uses of
sox effects though it is safe to do so as long as :func:`shutdown_sox_effects` is not called yet.
Once :func:`shutdown_sox_effects` is called, you can no longer use SoX effects and initializing
again will result in error.
"""
torch.ops.torchaudio.sox_effects_initialize_sox_effects()
@_mod_utils.requires_sox()
def shutdown_sox_effects():
"""Clean up resources required to use sox effects.
Note:
You do not need to call this function manually. It is called automatically.
It is safe to call this function multiple times.
Once :py:func:`shutdown_sox_effects` is called, you can no longer use SoX effects and
initializing again will result in error.
"""
torch.ops.torchaudio.sox_effects_shutdown_sox_effects()
@_mod_utils.requires_sox()
def effect_names() -> List[str]:
"""Gets list of valid sox effect names
Returns:
List[str]: list of available effect names.
Example
>>> torchaudio.sox_effects.effect_names()
['allpass', 'band', 'bandpass', ... ]
"""
return list(list_effects().keys())
@_mod_utils.requires_sox()
def apply_effects_tensor(
tensor: torch.Tensor,
sample_rate: int,
effects: List[List[str]],
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
"""Apply sox effects to given Tensor
Note:
This function only works on CPU Tensors.
This function works in the way very similar to ``sox`` command, however there are slight
differences. For example, ``sox`` command adds certain effects automatically (such as
``rate`` effect after ``speed`` and ``pitch`` and other effects), but this function does
only applies the given effects. (Therefore, to actually apply ``speed`` effect, you also
need to give ``rate`` effect with desired sampling rate.).
Args:
tensor (torch.Tensor): Input 2D CPU Tensor.
sample_rate (int): Sample rate
effects (List[List[str]]): List of effects.
channels_first (bool, optional): Indicates if the input Tensor's dimension is
`[channels, time]` or `[time, channels]`
Returns:
(Tensor, int): Resulting Tensor and sample rate.
The resulting Tensor has the same ``dtype`` as the input Tensor, and
the same channels order. The shape of the Tensor can be different based on the
effects applied. Sample rate can also be different based on the effects applied.
Example - Basic usage
>>>
>>> # Defines the effects to apply
>>> effects = [
... ['gain', '-n'], # normalises to 0dB
... ['pitch', '5'], # 5 cent pitch shift
... ['rate', '8000'], # resample to 8000 Hz
... ]
>>>
>>> # Generate pseudo wave:
>>> # normalized, channels first, 2ch, sampling rate 16000, 1 second
>>> sample_rate = 16000
>>> waveform = 2 * torch.rand([2, sample_rate * 1]) - 1
>>> waveform.shape
torch.Size([2, 16000])
>>> waveform
tensor([[ 0.3138, 0.7620, -0.9019, ..., -0.7495, -0.4935, 0.5442],
[-0.0832, 0.0061, 0.8233, ..., -0.5176, -0.9140, -0.2434]])
>>>
>>> # Apply effects
>>> waveform, sample_rate = apply_effects_tensor(
... wave_form, sample_rate, effects, channels_first=True)
>>>
>>> # Check the result
>>> # The new waveform is sampling rate 8000, 1 second.
>>> # normalization and channel order are preserved
>>> waveform.shape
torch.Size([2, 8000])
>>> waveform
tensor([[ 0.5054, -0.5518, -0.4800, ..., -0.0076, 0.0096, -0.0110],
[ 0.1331, 0.0436, -0.3783, ..., -0.0035, 0.0012, 0.0008]])
>>> sample_rate
8000
Example - Torchscript-able transform
>>>
>>> # Use `apply_effects_tensor` in `torch.nn.Module` and dump it to file,
>>> # then run sox effect via Torchscript runtime.
>>>
>>> class SoxEffectTransform(torch.nn.Module):
... effects: List[List[str]]
...
... def __init__(self, effects: List[List[str]]):
... super().__init__()
... self.effects = effects
...
... def forward(self, tensor: torch.Tensor, sample_rate: int):
... return sox_effects.apply_effects_tensor(
... tensor, sample_rate, self.effects)
...
...
>>> # Create transform object
>>> effects = [
... ["lowpass", "-1", "300"], # apply single-pole lowpass filter
... ["rate", "8000"], # change sample rate to 8000
... ]
>>> transform = SoxEffectTensorTransform(effects, input_sample_rate)
>>>
>>> # Dump it to file and load
>>> path = 'sox_effect.zip'
>>> torch.jit.script(trans).save(path)
>>> transform = torch.jit.load(path)
>>>
>>>> # Run transform
>>> waveform, input_sample_rate = torchaudio.load("input.wav")
>>> waveform, sample_rate = transform(waveform, input_sample_rate)
>>> assert sample_rate == 8000
"""
return torch.ops.torchaudio.sox_effects_apply_effects_tensor(
tensor, sample_rate, effects, channels_first)
@_mod_utils.requires_sox()
def apply_effects_file(
path: str,
effects: List[List[str]],
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
"""Apply sox effects to the audio file and load the resulting data as Tensor
Note:
This function works in the way very similar to ``sox`` command, however there are slight
differences. For example, ``sox`` commnad adds certain effects automatically (such as
``rate`` effect after ``speed``, ``pitch`` etc), but this function only applies the given
effects. Therefore, to actually apply ``speed`` effect, you also need to give ``rate``
effect with desired sampling rate, because internally, ``speed`` effects only alter sampling
rate and leave samples untouched.
Args:
path (path-like object or file-like object):
Source of audio data. When the function is not compiled by TorchScript,
(e.g. ``torch.jit.script``), the following types are accepted:
* ``path-like``: file path
* ``file-like``: Object with ``read(size: int) -> bytes`` method,
which returns byte string of at most ``size`` length.
When the function is compiled by TorchScript, only ``str`` type is allowed.
Note: This argument is intentionally annotated as ``str`` only for
TorchScript compiler compatibility.
effects (List[List[str]]): List of effects.
normalize (bool, optional):
When ``True``, this function always return ``float32``, and sample values are
normalized to ``[-1.0, 1.0]``.
If input file is integer WAV, giving ``False`` will change the resulting Tensor type to
integer type. This argument has no effect for formats other
than integer WAV type.
channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`.
Otherwise, the returned Tensor's dimension is `[time, channel]`.
format (str or None, optional):
Override the format detection with the given format.
Providing the argument might help when libsox can not infer the format
from header or extension,
Returns:
(Tensor, int): Resulting Tensor and sample rate.
If ``normalize=True``, the resulting Tensor is always ``float32`` type.
If ``normalize=False`` and the input audio file is of integer WAV file, then the
resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported)
If ``channels_first=True``, the resulting Tensor has dimension `[channel, time]`,
otherwise `[time, channel]`.
Example - Basic usage
>>>
>>> # Defines the effects to apply
>>> effects = [
... ['gain', '-n'], # normalises to 0dB
... ['pitch', '5'], # 5 cent pitch shift
... ['rate', '8000'], # resample to 8000 Hz
... ]
>>>
>>> # Apply effects and load data with channels_first=True
>>> waveform, sample_rate = apply_effects_file("data.wav", effects, channels_first=True)
>>>
>>> # Check the result
>>> waveform.shape
torch.Size([2, 8000])
>>> waveform
tensor([[ 5.1151e-03, 1.8073e-02, 2.2188e-02, ..., 1.0431e-07,
-1.4761e-07, 1.8114e-07],
[-2.6924e-03, 2.1860e-03, 1.0650e-02, ..., 6.4122e-07,
-5.6159e-07, 4.8103e-07]])
>>> sample_rate
8000
Example - Apply random speed perturbation to dataset
>>>
>>> # Load data from file, apply random speed perturbation
>>> class RandomPerturbationFile(torch.utils.data.Dataset):
... \"\"\"Given flist, apply random speed perturbation
...
... Suppose all the input files are at least one second long.
... \"\"\"
... def __init__(self, flist: List[str], sample_rate: int):
... super().__init__()
... self.flist = flist
... self.sample_rate = sample_rate
...
... def __getitem__(self, index):
... speed = 0.5 + 1.5 * random.randn()
... effects = [
... ['gain', '-n', '-10'], # apply 10 db attenuation
... ['remix', '-'], # merge all the channels
... ['speed', f'{speed:.5f}'], # duration is now 0.5 ~ 2.0 seconds.
... ['rate', f'{self.sample_rate}'],
... ['pad', '0', '1.5'], # add 1.5 seconds silence at the end
... ['trim', '0', '2'], # get the first 2 seconds
... ]
... waveform, _ = torchaudio.sox_effects.apply_effects_file(
... self.flist[index], effects)
... return waveform
...
... def __len__(self):
... return len(self.flist)
...
>>> dataset = RandomPerturbationFile(file_list, sample_rate=8000)
>>> loader = torch.utils.data.DataLoader(dataset, batch_size=32)
>>> for batch in loader:
>>> pass
"""
if not torch.jit.is_scripting():
if hasattr(path, 'read'):
return torchaudio._torchaudio.apply_effects_fileobj(
path, effects, normalize, channels_first, format)
path = os.fspath(path)
return torch.ops.torchaudio.sox_effects_apply_effects_file(
path, effects, normalize, channels_first, format)
|
from torchaudio._internal import module_utils as _mod_utils
from .sox_effects import (
init_sox_effects,
shutdown_sox_effects,
effect_names,
apply_effects_tensor,
apply_effects_file,
)
if _mod_utils.is_sox_available():
import atexit
init_sox_effects()
atexit.register(shutdown_sox_effects)
__all__ = [
'init_sox_effects',
'shutdown_sox_effects',
'effect_names',
'apply_effects_tensor',
'apply_effects_file',
]
|
import math
import warnings
from typing import Optional
import torch
from torch import Tensor
def _dB2Linear(x: float) -> float:
return math.exp(x * math.log(10) / 20.0)
def _generate_wave_table(
wave_type: str,
data_type: str,
table_size: int,
min: float,
max: float,
phase: float,
device: torch.device,
) -> Tensor:
r"""A helper function for phaser. Generates a table with given parameters.
Args:
wave_type (str): SINE or TRIANGULAR
data_type (str): desired data_type ( `INT` or `FLOAT` )
table_size (int): desired table size
min (float): desired min value
max (float): desired max value
phase (float): desired phase
device (torch.device): Torch device on which table must be generated
Returns:
Tensor: A 1D tensor with wave table values
"""
phase_offset = int(phase / math.pi / 2 * table_size + 0.5)
t = torch.arange(table_size, device=device, dtype=torch.int32)
point = (t + phase_offset) % table_size
d = torch.zeros_like(point, device=device, dtype=torch.float64)
if wave_type == "SINE":
d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2
elif wave_type == "TRIANGLE":
d = point.to(torch.float64) * 2 / table_size
value = torch.div(4 * point, table_size, rounding_mode='floor')
d[value == 0] = d[value == 0] + 0.5
d[value == 1] = 1.5 - d[value == 1]
d[value == 2] = 1.5 - d[value == 2]
d[value == 3] = d[value == 3] - 1.5
d = d * (max - min) + min
if data_type == "INT":
mask = d < 0
d[mask] = d[mask] - 0.5
d[~mask] = d[~mask] + 0.5
d = d.to(torch.int32)
elif data_type == "FLOAT":
d = d.to(torch.float32)
return d
def allpass_biquad(
waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707
) -> Tensor:
r"""Design two-pole all-pass filter. Similar to SoX implementation.
Args:
waveform(torch.Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
central_freq (float or torch.Tensor): central frequency (in Hz)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
b0 = 1 - alpha
b1 = -2 * torch.cos(w0)
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def band_biquad(
waveform: Tensor,
sample_rate: int,
central_freq: float,
Q: float = 0.707,
noise: bool = False,
) -> Tensor:
r"""Design two-pole band filter. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
central_freq (float or torch.Tensor): central frequency (in Hz)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion).
If ``False``, uses mode oriented to pitched audio, i.e. voice, singing,
or instrumental music (Default: ``False``).
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
bw_Hz = central_freq / Q
a0 = 1.0
a2 = torch.exp(-2 * math.pi * bw_Hz / sample_rate)
a1 = -4 * a2 / (1 + a2) * torch.cos(w0)
b0 = torch.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2)
if noise:
mult = torch.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0
b0 = mult * b0
b1 = 0.0
b2 = 0.0
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def bandpass_biquad(
waveform: Tensor,
sample_rate: int,
central_freq: float,
Q: float = 0.707,
const_skirt_gain: bool = False,
) -> Tensor:
r"""Design two-pole band-pass filter. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
central_freq (float or torch.Tensor): central frequency (in Hz)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q).
If ``False``, uses a constant 0dB peak gain. (Default: ``False``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
temp = torch.sin(w0) / 2 if const_skirt_gain else alpha
b0 = temp
b1 = 0.0
b2 = -temp
a0 = 1 + alpha
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def bandreject_biquad(
waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707
) -> Tensor:
r"""Design two-pole band-reject filter. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
central_freq (float or torch.Tensor): central frequency (in Hz)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
b0 = 1.0
b1 = -2 * torch.cos(w0)
b2 = 1.0
a0 = 1 + alpha
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def bass_biquad(
waveform: Tensor,
sample_rate: int,
gain: float,
central_freq: float = 100,
Q: float = 0.707,
) -> Tensor:
r"""Design a bass tone-control effect. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB.
central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``100``)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
gain = torch.as_tensor(gain, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
A = torch.exp(gain / 40 * math.log(10))
temp1 = 2 * torch.sqrt(A) * alpha
temp2 = (A - 1) * torch.cos(w0)
temp3 = (A + 1) * torch.cos(w0)
b0 = A * ((A + 1) - temp2 + temp1)
b1 = 2 * A * ((A - 1) - temp3)
b2 = A * ((A + 1) - temp2 - temp1)
a0 = (A + 1) + temp2 + temp1
a1 = -2 * ((A - 1) + temp3)
a2 = (A + 1) + temp2 - temp1
return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0)
def biquad(
waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float
) -> Tensor:
r"""Perform a biquad filter of input tensor. Initial conditions set to 0.
https://en.wikipedia.org/wiki/Digital_biquad_filter
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
b0 (float or torch.Tensor): numerator coefficient of current input, x[n]
b1 (float or torch.Tensor): numerator coefficient of input one time step ago x[n-1]
b2 (float or torch.Tensor): numerator coefficient of input two time steps ago x[n-2]
a0 (float or torch.Tensor): denominator coefficient of current output y[n], typically 1
a1 (float or torch.Tensor): denominator coefficient of current output y[n-1]
a2 (float or torch.Tensor): denominator coefficient of current output y[n-2]
Returns:
Tensor: Waveform with dimension of `(..., time)`
"""
device = waveform.device
dtype = waveform.dtype
b0 = torch.as_tensor(b0, dtype=dtype, device=device).view(1)
b1 = torch.as_tensor(b1, dtype=dtype, device=device).view(1)
b2 = torch.as_tensor(b2, dtype=dtype, device=device).view(1)
a0 = torch.as_tensor(a0, dtype=dtype, device=device).view(1)
a1 = torch.as_tensor(a1, dtype=dtype, device=device).view(1)
a2 = torch.as_tensor(a2, dtype=dtype, device=device).view(1)
output_waveform = lfilter(
waveform,
torch.cat([a0, a1, a2]),
torch.cat([b0, b1, b2]),
)
return output_waveform
def contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor:
r"""Apply contrast effect. Similar to SoX implementation.
Comparable with compression, this effect modifies an audio signal to make it sound louder
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
enhancement_amount (float, optional): controls the amount of the enhancement
Allowed range of values for enhancement_amount : 0-100
Note that enhancement_amount = 0 still gives a significant contrast enhancement
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
"""
if not 0 <= enhancement_amount <= 100:
raise ValueError("Allowed range of values for enhancement_amount : 0-100")
contrast = enhancement_amount / 750.0
temp1 = waveform * (math.pi / 2)
temp2 = contrast * torch.sin(temp1 * 4)
output_waveform = torch.sin(temp1 + temp2)
return output_waveform
def dcshift(
waveform: Tensor, shift: float, limiter_gain: Optional[float] = None
) -> Tensor:
r"""Apply a DC shift to the audio. Similar to SoX implementation.
This can be useful to remove a DC offset
(caused perhaps by a hardware problem in the recording chain) from the audio
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
shift (float): indicates the amount to shift the audio
Allowed range of values for shift : -2.0 to +2.0
limiter_gain (float of None, optional): It is used only on peaks to prevent clipping
It should have a value much less than 1 (e.g. 0.05 or 0.02)
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
"""
output_waveform = waveform
limiter_threshold = 0.0
if limiter_gain is not None:
limiter_threshold = 1.0 - (abs(shift) - limiter_gain)
if limiter_gain is not None and shift > 0:
mask = waveform > limiter_threshold
temp = (
(waveform[mask] - limiter_threshold)
* limiter_gain
/ (1 - limiter_threshold)
)
output_waveform[mask] = (temp + limiter_threshold + shift).clamp(
max=limiter_threshold
)
output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)
elif limiter_gain is not None and shift < 0:
mask = waveform < -limiter_threshold
temp = (
(waveform[mask] + limiter_threshold)
* limiter_gain
/ (1 - limiter_threshold)
)
output_waveform[mask] = (temp - limiter_threshold + shift).clamp(
min=-limiter_threshold
)
output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)
else:
output_waveform = (waveform + shift).clamp(min=-1, max=1)
return output_waveform
def deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor:
r"""Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000``
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
if sample_rate == 44100:
central_freq = 5283
width_slope = 0.4845
gain = -9.477
elif sample_rate == 48000:
central_freq = 5356
width_slope = 0.479
gain = -9.62
else:
raise ValueError("Sample rate must be 44100 (audio-CD) or 48000 (DAT)")
w0 = 2 * math.pi * central_freq / sample_rate
A = math.exp(gain / 40.0 * math.log(10))
alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2)
temp1 = 2 * math.sqrt(A) * alpha
temp2 = (A - 1) * math.cos(w0)
temp3 = (A + 1) * math.cos(w0)
b0 = A * ((A + 1) + temp2 + temp1)
b1 = -2 * A * ((A - 1) + temp3)
b2 = A * ((A + 1) + temp2 - temp1)
a0 = (A + 1) - temp2 + temp1
a1 = 2 * ((A - 1) - temp3)
a2 = (A + 1) - temp2 - temp1
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor:
r"""Noise shaping is calculated by error:
error[n] = dithered[n] - original[n]
noise_shaped_waveform[n] = dithered[n] + error[n-1]
"""
wf_shape = waveform.size()
waveform = waveform.reshape(-1, wf_shape[-1])
dithered_shape = dithered_waveform.size()
dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1])
error = dithered_waveform - waveform
# add error[n-1] to dithered_waveform[n], so offset the error by 1 index
zeros = torch.zeros(1, dtype=error.dtype, device=error.device)
for index in range(error.size()[0]):
err = error[index]
error_offset = torch.cat((zeros, err))
error[index] = error_offset[: waveform.size()[1]]
noise_shaped = dithered_waveform + error
return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:])
def _apply_probability_distribution(
waveform: Tensor, density_function: str = "TPDF"
) -> Tensor:
r"""Apply a probability distribution function on a waveform.
Triangular probability density function (TPDF) dither noise has a
triangular distribution; values in the center of the range have a higher
probability of occurring.
Rectangular probability density function (RPDF) dither noise has a
uniform distribution; any value in the specified range has the same
probability of occurring.
Gaussian probability density function (GPDF) has a normal distribution.
The relationship of probabilities of results follows a bell-shaped,
or Gaussian curve, typical of dither generated by analog sources.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
density_function (str, optional): The density function of a
continuous random variable (Default: ``"TPDF"``)
Options: Triangular Probability Density Function - `TPDF`
Rectangular Probability Density Function - `RPDF`
Gaussian Probability Density Function - `GPDF`
Returns:
Tensor: waveform dithered with TPDF
"""
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
channel_size = waveform.size()[0] - 1
time_size = waveform.size()[-1] - 1
random_channel = (
int(
torch.randint(
channel_size,
[
1,
],
).item()
)
if channel_size > 0
else 0
)
random_time = (
int(
torch.randint(
time_size,
[
1,
],
).item()
)
if time_size > 0
else 0
)
number_of_bits = 16
up_scaling = 2 ** (number_of_bits - 1) - 2
signal_scaled = waveform * up_scaling
down_scaling = 2 ** (number_of_bits - 1)
signal_scaled_dis = waveform
if density_function == "RPDF":
RPDF = waveform[random_channel][random_time] - 0.5
signal_scaled_dis = signal_scaled + RPDF
elif density_function == "GPDF":
# TODO Replace by distribution code once
# https://github.com/pytorch/pytorch/issues/29843 is resolved
# gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample()
num_rand_variables = 6
gaussian = waveform[random_channel][random_time]
for ws in num_rand_variables * [time_size]:
rand_chan = int(
torch.randint(
channel_size,
[
1,
],
).item()
)
gaussian += waveform[rand_chan][
int(
torch.randint(
ws,
[
1,
],
).item()
)
]
signal_scaled_dis = signal_scaled + gaussian
else:
# dtype needed for https://github.com/pytorch/pytorch/issues/32358
TPDF = torch.bartlett_window(
time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device
)
TPDF = TPDF.repeat((channel_size + 1), 1)
signal_scaled_dis = signal_scaled + TPDF
quantised_signal_scaled = torch.round(signal_scaled_dis)
quantised_signal = quantised_signal_scaled / down_scaling
# unpack batch
return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:])
def dither(
waveform: Tensor, density_function: str = "TPDF", noise_shaping: bool = False
) -> Tensor:
r"""Dither increases the perceived dynamic range of audio stored at a
particular bit-depth by eliminating nonlinear truncation distortion
(i.e. adding minimally perceived noise to mask distortion caused by quantization).
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
density_function (str, optional):
The density function of a continuous random variable. One of
``"TPDF"`` (Triangular Probability Density Function),
``"RPDF"`` (Rectangular Probability Density Function) or
``"GPDF"`` (Gaussian Probability Density Function) (Default: ``"TPDF"``).
noise_shaping (bool, optional): a filtering process that shapes the spectral
energy of quantisation error (Default: ``False``)
Returns:
Tensor: waveform dithered
"""
dithered = _apply_probability_distribution(
waveform, density_function=density_function
)
if noise_shaping:
return _add_noise_shaping(dithered, waveform)
else:
return dithered
def equalizer_biquad(
waveform: Tensor,
sample_rate: int,
center_freq: float,
gain: float,
Q: float = 0.707,
) -> Tensor:
r"""Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
center_freq (float): filter's central frequency
gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
"""
dtype = waveform.dtype
device = waveform.device
center_freq = torch.as_tensor(center_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
gain = torch.as_tensor(gain, dtype=dtype, device=device)
w0 = 2 * math.pi * center_freq / sample_rate
A = torch.exp(gain / 40.0 * math.log(10))
alpha = torch.sin(w0) / 2 / Q
b0 = 1 + alpha * A
b1 = -2 * torch.cos(w0)
b2 = 1 - alpha * A
a0 = 1 + alpha / A
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha / A
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def filtfilt(
waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, clamp: bool = True,
) -> Tensor:
r"""Apply an IIR filter forward and backward to a waveform.
Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1.
a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either
1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
Lower delay coefficients are first, e.g. ``[a0, a1, a2, ...]``.
Must be same size as b_coeffs (pad with 0's as necessary).
b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either
1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
Lower delay coefficients are first, e.g. ``[b0, b1, b2, ...]``.
Must be same size as a_coeffs (pad with 0's as necessary).
clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
Returns:
Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs``
are 2D Tensors, or `(..., time)` otherwise.
"""
forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp=False, batching=True)
backward_filtered = lfilter(
forward_filtered.flip(-1), a_coeffs, b_coeffs, clamp=clamp, batching=True,
).flip(-1)
return backward_filtered
def flanger(
waveform: Tensor,
sample_rate: int,
delay: float = 0.0,
depth: float = 2.0,
regen: float = 0.0,
width: float = 71.0,
speed: float = 0.5,
phase: float = 25.0,
modulation: str = "sinusoidal",
interpolation: str = "linear",
) -> Tensor:
r"""Apply a flanger effect to the audio. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., channel, time)` .
Max 4 channels allowed
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
delay (float, optional): desired delay in milliseconds(ms)
Allowed range of values are 0 to 30
depth (float, optional): desired delay depth in milliseconds(ms)
Allowed range of values are 0 to 10
regen (float, optional): desired regen(feedback gain) in dB
Allowed range of values are -95 to 95
width (float, optional): desired width(delay gain) in dB
Allowed range of values are 0 to 100
speed (float, optional): modulation speed in Hz
Allowed range of values are 0.1 to 10
phase (float, optional): percentage phase-shift for multi-channel
Allowed range of values are 0 to 100
modulation (str, optional): Use either "sinusoidal" or "triangular" modulation. (Default: ``sinusoidal``)
interpolation (str, optional): Use either "linear" or "quadratic" for delay-line interpolation.
(Default: ``linear``)
Returns:
Tensor: Waveform of dimension of `(..., channel, time)`
Reference:
- http://sox.sourceforge.net/sox.html
- Scott Lehman, `Effects Explained`_,
.. _Effects Explained:
https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html
"""
if modulation not in ("sinusoidal", "triangular"):
raise ValueError("Only 'sinusoidal' or 'triangular' modulation allowed")
if interpolation not in ("linear", "quadratic"):
raise ValueError("Only 'linear' or 'quadratic' interpolation allowed")
actual_shape = waveform.shape
device, dtype = waveform.device, waveform.dtype
if actual_shape[-2] > 4:
raise ValueError("Max 4 channels allowed")
# convert to 3D (batch, channels, time)
waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1])
# Scaling
feedback_gain = regen / 100
delay_gain = width / 100
channel_phase = phase / 100
delay_min = delay / 1000
delay_depth = depth / 1000
n_channels = waveform.shape[-2]
if modulation == "sinusoidal":
wave_type = "SINE"
else:
wave_type = "TRIANGLE"
# Balance output:
in_gain = 1.0 / (1 + delay_gain)
delay_gain = delay_gain / (1 + delay_gain)
# Balance feedback loop:
delay_gain = delay_gain * (1 - abs(feedback_gain))
delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5)
delay_buf_length = delay_buf_length + 2
delay_bufs = torch.zeros(
waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device
)
delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device)
lfo_length = int(sample_rate / speed)
table_min = math.floor(delay_min * sample_rate + 0.5)
table_max = delay_buf_length - 2.0
lfo = _generate_wave_table(
wave_type=wave_type,
data_type="FLOAT",
table_size=lfo_length,
min=float(table_min),
max=float(table_max),
phase=3 * math.pi / 2,
device=device,
)
output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)
delay_buf_pos = 0
lfo_pos = 0
channel_idxs = torch.arange(0, n_channels, device=device)
for i in range(waveform.shape[-1]):
delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length
cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to(
torch.int64
)
delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length]
frac_delay = torch.frac(delay_tensor)
delay_tensor = torch.floor(delay_tensor)
int_delay = delay_tensor.to(torch.int64)
temp = waveform[:, :, i]
delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain
delayed_0 = delay_bufs[
:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length
]
int_delay = int_delay + 1
delayed_1 = delay_bufs[
:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length
]
int_delay = int_delay + 1
if interpolation == "linear":
delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay
else:
delayed_2 = delay_bufs[
:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length
]
int_delay = int_delay + 1
delayed_2 = delayed_2 - delayed_0
delayed_1 = delayed_1 - delayed_0
a = delayed_2 * 0.5 - delayed_1
b = delayed_1 * 2 - delayed_2 * 0.5
delayed = delayed_0 + (a * frac_delay + b) * frac_delay
delay_last = delayed
output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain
lfo_pos = (lfo_pos + 1) % lfo_length
return output_waveform.clamp(min=-1, max=1).view(actual_shape)
def gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor:
r"""Apply amplification or attenuation to the whole waveform.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``).
Returns:
Tensor: the whole waveform amplified by gain_db.
"""
if gain_db == 0:
return waveform
ratio = 10 ** (gain_db / 20)
return waveform * ratio
def highpass_biquad(
waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707
) -> Tensor:
r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
cutoff_freq (float or torch.Tensor): filter cutoff frequency
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
Returns:
Tensor: Waveform dimension of `(..., time)`
"""
dtype = waveform.dtype
device = waveform.device
cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * cutoff_freq / sample_rate
alpha = torch.sin(w0) / 2.0 / Q
b0 = (1 + torch.cos(w0)) / 2
b1 = -1 - torch.cos(w0)
b2 = b0
a0 = 1 + alpha
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor):
n_order = a_coeffs_flipped.size(1)
a_coeffs_flipped = a_coeffs_flipped.unsqueeze(2)
for i_sample, o0 in enumerate(input_signal_windows.permute(2, 0, 1)):
windowed_output_signal = padded_output_waveform[
:, :, i_sample:i_sample + n_order
]
o0 -= (windowed_output_signal.transpose(0, 1) @ a_coeffs_flipped)[..., 0].t()
padded_output_waveform[:, :, i_sample + n_order - 1] = o0
try:
_lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop
except RuntimeError as err:
assert str(err) == 'No such operator torchaudio::_lfilter_core_loop'
_lfilter_core_cpu_loop = _lfilter_core_generic_loop
def _lfilter_core(
waveform: Tensor,
a_coeffs: Tensor,
b_coeffs: Tensor,
) -> Tensor:
assert a_coeffs.size() == b_coeffs.size()
assert len(waveform.size()) == 3
assert waveform.device == a_coeffs.device
assert b_coeffs.device == a_coeffs.device
n_batch, n_channel, n_sample = waveform.size()
n_order = a_coeffs.size(1)
assert n_order > 0
# Pad the input and create output
padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0])
padded_output_waveform = torch.zeros_like(padded_waveform)
# Set up the coefficients matrix
# Flip coefficients' order
a_coeffs_flipped = a_coeffs.flip(1)
b_coeffs_flipped = b_coeffs.flip(1)
# calculate windowed_input_signal in parallel using convolution
input_signal_windows = torch.nn.functional.conv1d(
padded_waveform,
b_coeffs_flipped.unsqueeze(1),
groups=n_channel
)
input_signal_windows.div_(a_coeffs[:, :1])
a_coeffs_flipped.div_(a_coeffs[:, :1])
if input_signal_windows.device == torch.device('cpu') and\
a_coeffs_flipped.device == torch.device('cpu') and\
padded_output_waveform.device == torch.device('cpu'):
_lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)
else:
_lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)
output = padded_output_waveform[:, :, n_order - 1:]
return output
try:
_lfilter = torch.ops.torchaudio._lfilter
except RuntimeError as err:
assert str(err) == 'No such operator torchaudio::_lfilter'
_lfilter = _lfilter_core
def lfilter(
waveform: Tensor,
a_coeffs: Tensor,
b_coeffs: Tensor,
clamp: bool = True,
batching: bool = True
) -> Tensor:
r"""Perform an IIR filter by evaluating difference equation.
Note:
To avoid numerical problems, small filter order is preferred.
Using double precision could also minimize numerical precision errors.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1.
a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either
1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.
Must be same size as b_coeffs (pad with 0's as necessary).
b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either
1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.
Must be same size as a_coeffs (pad with 0's as necessary).
clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
batching (bool, optional): Effective only when coefficients are 2D. If ``True``, then waveform should be at
least 2D, and the size of second axis from last should equals to ``num_filters``.
The output can be expressed as ``output[..., i, :] = lfilter(waveform[..., i, :],
a_coeffs[i], b_coeffs[i], clamp=clamp, batching=False)``. (Default: ``True``)
Returns:
Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs``
are 2D Tensors, or `(..., time)` otherwise.
"""
assert a_coeffs.size() == b_coeffs.size()
assert a_coeffs.ndim <= 2
if a_coeffs.ndim > 1:
if batching:
assert waveform.ndim > 1
assert waveform.shape[-2] == a_coeffs.shape[0]
else:
waveform = torch.stack([waveform] * a_coeffs.shape[0], -2)
else:
a_coeffs = a_coeffs.unsqueeze(0)
b_coeffs = b_coeffs.unsqueeze(0)
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, a_coeffs.shape[0], shape[-1])
output = _lfilter(waveform, a_coeffs, b_coeffs)
if clamp:
output = torch.clamp(output, min=-1.0, max=1.0)
# unpack batch
output = output.reshape(shape[:-1] + output.shape[-1:])
return output
def lowpass_biquad(
waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707
) -> Tensor:
r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation.
Args:
waveform (torch.Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
cutoff_freq (float or torch.Tensor): filter cutoff frequency
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
"""
dtype = waveform.dtype
device = waveform.device
cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
w0 = 2 * math.pi * cutoff_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
b0 = (1 - torch.cos(w0)) / 2
b1 = 1 - torch.cos(w0)
b2 = b0
a0 = 1 + alpha
a1 = -2 * torch.cos(w0)
a2 = 1 - alpha
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def _overdrive_core_loop_generic(
waveform: Tensor,
temp: Tensor,
last_in: Tensor,
last_out: Tensor,
output_waveform: Tensor
):
for i in range(waveform.shape[-1]):
last_out = temp[:, i] - last_in + 0.995 * last_out
last_in = temp[:, i]
output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75
try:
_overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop
except RuntimeError as err:
assert str(err) == 'No such operator torchaudio::_overdrive_core_loop'
_overdrive_core_loop_cpu = _overdrive_core_loop_generic
def overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor:
r"""Apply a overdrive effect to the audio. Similar to SoX implementation.
This effect applies a non linear distortion to the audio signal.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
gain (float, optional): desired gain at the boost (or attenuation) in dB
Allowed range of values are 0 to 100
colour (float, optional): controls the amount of even harmonic content in the over-driven output
Allowed range of values are 0 to 100
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
"""
actual_shape = waveform.shape
device, dtype = waveform.device, waveform.dtype
# convert to 2D (..,time)
waveform = waveform.view(-1, actual_shape[-1])
gain = _dB2Linear(gain)
colour = colour / 200
last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)
last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)
temp = waveform * gain + colour
mask1 = temp < -1
temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device)
# Wrapping the constant with Tensor is required for Torchscript
mask2 = temp > 1
temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device)
mask3 = ~mask1 & ~mask2
temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3)
output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)
# Uses CPU optimized loop function if available for CPU device
if device == torch.device('cpu'):
_overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform)
else:
_overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform)
return output_waveform.clamp(min=-1, max=1).view(actual_shape)
def phaser(
waveform: Tensor,
sample_rate: int,
gain_in: float = 0.4,
gain_out: float = 0.74,
delay_ms: float = 3.0,
decay: float = 0.4,
mod_speed: float = 0.5,
sinusoidal: bool = True,
) -> Tensor:
r"""Apply a phasing effect to the audio. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
gain_in (float, optional): desired input gain at the boost (or attenuation) in dB
Allowed range of values are 0 to 1
gain_out (float, optional): desired output gain at the boost (or attenuation) in dB
Allowed range of values are 0 to 1e9
delay_ms (float, optional): desired delay in milliseconds
Allowed range of values are 0 to 5.0
decay (float, optional): desired decay relative to gain-in
Allowed range of values are 0 to 0.99
mod_speed (float, optional): modulation speed in Hz
Allowed range of values are 0.1 to 2
sinusoidal (bool, optional): If ``True``, uses sinusoidal modulation (preferable for multiple instruments)
If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect)
(Default: ``True``)
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- Scott Lehman, `Effects Explained`_.
.. _Effects Explained:
https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html
"""
actual_shape = waveform.shape
device, dtype = waveform.device, waveform.dtype
# convert to 2D (channels,time)
waveform = waveform.view(-1, actual_shape[-1])
delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5)
delay_buf = torch.zeros(
waveform.shape[0], delay_buf_len, dtype=dtype, device=device
)
mod_buf_len = int(sample_rate / mod_speed + 0.5)
if sinusoidal:
wave_type = "SINE"
else:
wave_type = "TRIANGLE"
mod_buf = _generate_wave_table(
wave_type=wave_type,
data_type="INT",
table_size=mod_buf_len,
min=1.0,
max=float(delay_buf_len),
phase=math.pi / 2,
device=device,
)
delay_pos = 0
mod_pos = 0
output_waveform_pre_gain_list = []
waveform = waveform * gain_in
delay_buf = delay_buf * decay
waveform_list = [waveform[:, i] for i in range(waveform.size(1))]
delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))]
mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))]
for i in range(waveform.shape[-1]):
idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len)
mod_pos = (mod_pos + 1) % mod_buf_len
delay_pos = (delay_pos + 1) % delay_buf_len
temp = (waveform_list[i]) + (delay_buf_list[idx])
delay_buf_list[delay_pos] = temp * decay
output_waveform_pre_gain_list.append(temp)
output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to(
dtype=dtype, device=device
)
output_waveform.mul_(gain_out)
return output_waveform.clamp(min=-1, max=1).view(actual_shape)
def riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor:
r"""Apply RIAA vinyl playback equalization. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz).
Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000``
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
if sample_rate == 44100:
zeros = [-0.2014898, 0.9233820]
poles = [0.7083149, 0.9924091]
elif sample_rate == 48000:
zeros = [-0.1766069, 0.9321590]
poles = [0.7396325, 0.9931330]
elif sample_rate == 88200:
zeros = [-0.1168735, 0.9648312]
poles = [0.8590646, 0.9964002]
elif sample_rate == 96000:
zeros = [-0.1141486, 0.9676817]
poles = [0.8699137, 0.9966946]
else:
raise ValueError("Sample rate must be 44.1k, 48k, 88.2k, or 96k")
# polynomial coefficients with roots zeros[0] and zeros[1]
b0 = 1.0
b1 = -(zeros[0] + zeros[1])
b2 = zeros[0] * zeros[1]
# polynomial coefficients with roots poles[0] and poles[1]
a0 = 1.0
a1 = -(poles[0] + poles[1])
a2 = poles[0] * poles[1]
# Normalize to 0dB at 1kHz
y = 2 * math.pi * 1000 / sample_rate
b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y)
a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y)
b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y)
a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y)
g = 1 / math.sqrt((b_re ** 2 + b_im ** 2) / (a_re ** 2 + a_im ** 2))
b0 *= g
b1 *= g
b2 *= g
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def treble_biquad(
waveform: Tensor,
sample_rate: int,
gain: float,
central_freq: float = 3000,
Q: float = 0.707,
) -> Tensor:
r"""Design a treble tone-control effect. Similar to SoX implementation.
Args:
waveform (Tensor): audio waveform of dimension of `(..., time)`
sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB.
central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``3000``)
Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
Returns:
Tensor: Waveform of dimension of `(..., time)`
Reference:
- http://sox.sourceforge.net/sox.html
- https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
"""
dtype = waveform.dtype
device = waveform.device
central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
Q = torch.as_tensor(Q, dtype=dtype, device=device)
gain = torch.as_tensor(gain, dtype=dtype, device=device)
w0 = 2 * math.pi * central_freq / sample_rate
alpha = torch.sin(w0) / 2 / Q
A = torch.exp(gain / 40 * math.log(10))
temp1 = 2 * torch.sqrt(A) * alpha
temp2 = (A - 1) * torch.cos(w0)
temp3 = (A + 1) * torch.cos(w0)
b0 = A * ((A + 1) + temp2 + temp1)
b1 = -2 * A * ((A - 1) + temp3)
b2 = A * ((A + 1) + temp2 - temp1)
a0 = (A + 1) - temp2 + temp1
a1 = 2 * ((A - 1) - temp3)
a2 = (A + 1) - temp2 - temp1
return biquad(waveform, b0, b1, b2, a0, a1, a2)
def _measure(
measure_len_ws: int,
samples: Tensor,
spectrum: Tensor,
noise_spectrum: Tensor,
spectrum_window: Tensor,
spectrum_start: int,
spectrum_end: int,
cepstrum_window: Tensor,
cepstrum_start: int,
cepstrum_end: int,
noise_reduction_amount: float,
measure_smooth_time_mult: float,
noise_up_time_mult: float,
noise_down_time_mult: float,
index_ns: int,
boot_count: int,
) -> float:
assert spectrum.size()[-1] == noise_spectrum.size()[-1]
samplesLen_ns = samples.size()[-1]
dft_len_ws = spectrum.size()[-1]
dftBuf = torch.zeros(dft_len_ws)
_index_ns = torch.tensor(
[index_ns] + [(index_ns + i) % samplesLen_ns for i in range(1, measure_len_ws)]
)
dftBuf[:measure_len_ws] = samples[_index_ns] * spectrum_window[:measure_len_ws]
# memset(c->dftBuf + i, 0, (p->dft_len_ws - i) * sizeof(*c->dftBuf));
dftBuf[measure_len_ws:dft_len_ws].zero_()
# lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf);
_dftBuf = torch.fft.rfft(dftBuf)
# memset(c->dftBuf, 0, p->spectrum_start * sizeof(*c->dftBuf));
_dftBuf[:spectrum_start].zero_()
mult: float = (
boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult
)
_d = _dftBuf[spectrum_start:spectrum_end].abs()
spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult))
_d = spectrum[spectrum_start:spectrum_end] ** 2
_zeros = torch.zeros(spectrum_end - spectrum_start)
_mult = (
_zeros
if boot_count >= 0
else torch.where(
_d > noise_spectrum[spectrum_start:spectrum_end],
torch.tensor(noise_up_time_mult), # if
torch.tensor(noise_down_time_mult), # else
)
)
noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult))
_d = torch.sqrt(
torch.max(
_zeros,
_d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end],
)
)
_cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1)
_cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window
_cepstrum_Buf[spectrum_end:dft_len_ws >> 1].zero_()
# lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf);
_cepstrum_Buf = torch.fft.rfft(_cepstrum_Buf)
result: float = float(
torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2))
)
result = (
math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf
)
return max(0, 21 + result)
def vad(
waveform: Tensor,
sample_rate: int,
trigger_level: float = 7.0,
trigger_time: float = 0.25,
search_time: float = 1.0,
allowed_gap: float = 0.25,
pre_trigger_time: float = 0.0,
# Fine-tuning parameters
boot_time: float = 0.35,
noise_up_time: float = 0.1,
noise_down_time: float = 0.01,
noise_reduction_amount: float = 1.35,
measure_freq: float = 20.0,
measure_duration: Optional[float] = None,
measure_smooth_time: float = 0.4,
hp_filter_freq: float = 50.0,
lp_filter_freq: float = 6000.0,
hp_lifter_freq: float = 150.0,
lp_lifter_freq: float = 2000.0,
) -> Tensor:
r"""Voice Activity Detector. Similar to SoX implementation.
Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
The algorithm currently uses a simple cepstral power measurement to detect voice,
so may be fooled by other things, especially music.
The effect can trim only from the front of the audio,
so in order to trim from the back, the reverse effect must also be used.
Args:
waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)`
Tensor of shape `(channels, time)` is treated as a multi-channel recording
of the same event and the resulting output will be trimmed to the earliest
voice activity in any channel.
sample_rate (int): Sample rate of audio signal.
trigger_level (float, optional): The measurement level used to trigger activity detection.
This may need to be cahnged depending on the noise level, signal level,
and other characteristics of the input audio. (Default: 7.0)
trigger_time (float, optional): The time constant (in seconds)
used to help ignore short bursts of sound. (Default: 0.25)
search_time (float, optional): The amount of audio (in seconds)
to search for quieter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 1.0)
allowed_gap (float, optional): The allowed gap (in seconds) between
quieter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 0.25)
pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
boot_time (float, optional) The algorithm (internally) uses adaptive noise
estimation/reduction in order to detect the start of the wanted audio.
This option sets the time for the initial noise estimate. (Default: 0.35)
noise_up_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is increasing. (Default: 0.1)
noise_down_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is decreasing. (Default: 0.01)
noise_reduction_amount (float, optional) Amount of noise reduction to use in
the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
measure_freq (float, optional) Frequency of the algorithm’s
processing/measurements. (Default: 20.0)
measure_duration: (float, optional) Measurement duration.
(Default: Twice the measurement period; i.e. with overlap.)
measure_smooth_time (float, optional) Time constant used to smooth
spectral measurements. (Default: 0.4)
hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
at the input to the detector algorithm. (Default: 50.0)
lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
at the input to the detector algorithm. (Default: 6000.0)
hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
in the detector algorithm. (Default: 150.0)
lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
in the detector algorithm. (Default: 2000.0)
Returns:
Tensor: Tensor of audio of dimension `(..., time)`.
Reference:
- http://sox.sourceforge.net/sox.html
"""
if waveform.ndim > 2:
warnings.warn(
"Expected input tensor dimension of 1 for single channel"
f" or 2 for multi-channel. Got {waveform.ndim} instead. "
"Batch semantics is not supported. "
"Please refer to https://github.com/pytorch/audio/issues/1348"
" and https://github.com/pytorch/audio/issues/1468."
)
measure_duration: float = (
2.0 / measure_freq if measure_duration is None else measure_duration
)
measure_len_ws = int(sample_rate * measure_duration + 0.5)
measure_len_ns = measure_len_ws
# for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1);
dft_len_ws = 16
while dft_len_ws < measure_len_ws:
dft_len_ws *= 2
measure_period_ns = int(sample_rate / measure_freq + 0.5)
measures_len = math.ceil(search_time * measure_freq)
search_pre_trigger_len_ns = measures_len * measure_period_ns
gap_len = int(allowed_gap * measure_freq + 0.5)
fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5)
samplesLen_ns = (
fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns
)
spectrum_window = torch.zeros(measure_len_ws)
for i in range(measure_len_ws):
# sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32)
spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws))
# lsx_apply_hann(spectrum_window, (int)measure_len_ws);
spectrum_window *= torch.hann_window(measure_len_ws, dtype=torch.float)
spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5)
spectrum_start: int = max(spectrum_start, 1)
spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5)
spectrum_end: int = min(spectrum_end, dft_len_ws // 2)
cepstrum_window = torch.zeros(spectrum_end - spectrum_start)
for i in range(spectrum_end - spectrum_start):
cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start)
# lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start));
cepstrum_window *= torch.hann_window(
spectrum_end - spectrum_start, dtype=torch.float
)
cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq)
cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq)
cepstrum_end = min(cepstrum_end, dft_len_ws // 4)
assert cepstrum_end > cepstrum_start
noise_up_time_mult = math.exp(-1.0 / (noise_up_time * measure_freq))
noise_down_time_mult = math.exp(-1.0 / (noise_down_time * measure_freq))
measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq))
trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq))
boot_count_max = int(boot_time * measure_freq - 0.5)
measure_timer_ns = measure_len_ns
boot_count = measures_index = flushedLen_ns = samplesIndex_ns = 0
# pack batch
shape = waveform.size()
waveform = waveform.view(-1, shape[-1])
n_channels, ilen = waveform.size()
mean_meas = torch.zeros(n_channels)
samples = torch.zeros(n_channels, samplesLen_ns)
spectrum = torch.zeros(n_channels, dft_len_ws)
noise_spectrum = torch.zeros(n_channels, dft_len_ws)
measures = torch.zeros(n_channels, measures_len)
has_triggered: bool = False
num_measures_to_flush: int = 0
pos: int = 0
while pos < ilen and not has_triggered:
measure_timer_ns -= 1
for i in range(n_channels):
samples[i, samplesIndex_ns] = waveform[i, pos]
# if (!p->measure_timer_ns) {
if measure_timer_ns == 0:
index_ns: int = (
samplesIndex_ns + samplesLen_ns - measure_len_ns
) % samplesLen_ns
meas: float = _measure(
measure_len_ws=measure_len_ws,
samples=samples[i],
spectrum=spectrum[i],
noise_spectrum=noise_spectrum[i],
spectrum_window=spectrum_window,
spectrum_start=spectrum_start,
spectrum_end=spectrum_end,
cepstrum_window=cepstrum_window,
cepstrum_start=cepstrum_start,
cepstrum_end=cepstrum_end,
noise_reduction_amount=noise_reduction_amount,
measure_smooth_time_mult=measure_smooth_time_mult,
noise_up_time_mult=noise_up_time_mult,
noise_down_time_mult=noise_down_time_mult,
index_ns=index_ns,
boot_count=boot_count,
)
measures[i, measures_index] = meas
mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * (
1.0 - trigger_meas_time_mult
)
has_triggered = has_triggered or (mean_meas[i] >= trigger_level)
if has_triggered:
n: int = measures_len
k: int = measures_index
jTrigger: int = n
jZero: int = n
j: int = 0
for j in range(n):
if (measures[i, k] >= trigger_level) and (
j <= jTrigger + gap_len
):
jZero = jTrigger = j
elif (measures[i, k] == 0) and (jTrigger >= jZero):
jZero = j
k = (k + n - 1) % n
j = min(j, jZero)
# num_measures_to_flush = range_limit(j, num_measures_to_flush, n);
num_measures_to_flush = min(max(num_measures_to_flush, j), n)
# end if has_triggered
# end if (measure_timer_ns == 0):
# end for
samplesIndex_ns += 1
pos += 1
# end while
if samplesIndex_ns == samplesLen_ns:
samplesIndex_ns = 0
if measure_timer_ns == 0:
measure_timer_ns = measure_period_ns
measures_index += 1
measures_index = measures_index % measures_len
if boot_count >= 0:
boot_count = -1 if boot_count == boot_count_max else boot_count + 1
if has_triggered:
flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns
samplesIndex_ns = (samplesIndex_ns + flushedLen_ns) % samplesLen_ns
res = waveform[:, pos - samplesLen_ns + flushedLen_ns:]
# unpack batch
return res.view(shape[:-1] + res.shape[-1:])
|
from .functional import (
amplitude_to_DB,
compute_deltas,
compute_kaldi_pitch,
create_dct,
melscale_fbanks,
linear_fbanks,
DB_to_amplitude,
detect_pitch_frequency,
inverse_spectrogram,
griffinlim,
mask_along_axis,
mask_along_axis_iid,
mu_law_encoding,
mu_law_decoding,
phase_vocoder,
sliding_window_cmn,
spectrogram,
spectral_centroid,
apply_codec,
resample,
edit_distance,
pitch_shift,
rnnt_loss,
)
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dither,
dcshift,
deemph_biquad,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
__all__ = [
'amplitude_to_DB',
'compute_deltas',
'compute_kaldi_pitch',
'create_dct',
'melscale_fbanks',
'linear_fbanks',
'DB_to_amplitude',
'detect_pitch_frequency',
'griffinlim',
'mask_along_axis',
'mask_along_axis_iid',
'mu_law_encoding',
'mu_law_decoding',
'phase_vocoder',
'sliding_window_cmn',
'spectrogram',
'inverse_spectrogram',
'spectral_centroid',
'allpass_biquad',
'band_biquad',
'bandpass_biquad',
'bandreject_biquad',
'bass_biquad',
'biquad',
'contrast',
'dither',
'dcshift',
'deemph_biquad',
'equalizer_biquad',
'filtfilt',
'flanger',
'gain',
'highpass_biquad',
'lfilter',
'lowpass_biquad',
'overdrive',
'phaser',
'riaa_biquad',
'treble_biquad',
'vad',
'apply_codec',
'resample',
'edit_distance',
'pitch_shift',
'rnnt_loss',
]
|
# -*- coding: utf-8 -*-
from collections.abc import Sequence
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"inverse_spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"melscale_fbanks",
"linear_fbanks",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: Optional[bool] = None,
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
return_complex (bool, optional):
Deprecated and not used.
Returns:
Tensor: Dimension `(..., freq, time)`, freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if return_complex is not None:
warnings.warn(
"`return_complex` argument is now deprecated and is not effective."
"`torchaudio.functional.spectrogram(power=None)` always returns a tensor with "
"complex dtype. Please remove the argument in the function call."
)
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return spec_f
def inverse_spectrogram(
spectrogram: Tensor,
length: Optional[int],
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
) -> Tensor:
r"""Create an inverse spectrogram or a batch of inverse spectrograms from the provided
complex-valued spectrogram.
Args:
spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time).
length (int or None): The output length of the waveform.
pad (int): Two sided padding of signal. It is only effective when ``length`` is provided.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
normalized (bool): Whether the stft output was normalized by magnitude
center (bool, optional): whether the waveform was padded on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. This parameter is provided for compatibility with the
spectrogram function and is not used. Default: ``"reflect"``
onesided (bool, optional): controls whether spectrogram was done in onesided mode.
Default: ``True``
Returns:
Tensor: Dimension `(..., time)`. Least squares estimation of the original signal.
"""
if not spectrogram.is_complex():
raise ValueError("Expected `spectrogram` to be complex dtype.")
if normalized:
spectrogram = spectrogram * window.pow(2.).sum().sqrt()
# pack batch
shape = spectrogram.size()
spectrogram = spectrogram.reshape(-1, shape[-2], shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
waveform = torch.istft(
input=spectrogram,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
normalized=False,
onesided=onesided,
length=length + 2 * pad if length is not None else None,
return_complex=False,
)
if length is not None and pad > 0:
# remove padding from front and back
waveform = waveform[:, pad:-pad]
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def _get_complex_dtype(real_dtype: torch.dtype):
if real_dtype == torch.double:
return torch.cdouble
if real_dtype == torch.float:
return torch.cfloat
if real_dtype == torch.half:
return torch.complex32
raise ValueError(f'Unexpected dtype {real_dtype}')
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from
*librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension `(..., freq, frames)`
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# initialize the phase
if rand_init:
angles = torch.rand(
specgram.size(),
dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
else:
angles = torch.full(
specgram.size(), 1,
dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
# And initialize the previous iterate to 0
tprev = torch.tensor(0., dtype=specgram.dtype, device=specgram.device)
for _ in range(n_iter):
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# Rebuild the spectrogram
rebuilt = torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(angles.abs().add(1e-16))
# Store the previous iterate
tprev = rebuilt
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def _create_triangular_filterbank(
all_freqs: Tensor,
f_pts: Tensor,
) -> Tensor:
"""Create a triangular filter bank.
Args:
all_freqs (Tensor): STFT freq points of size (`n_freqs`).
f_pts (Tensor): Filter mid points of size (`n_filter`).
Returns:
fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`).
"""
# Adopted from Librosa
# calculate the difference between each filter mid point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
return fb
def melscale_fbanks(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Note:
For the sake of the numerical compatibility with librosa, not all the coefficients
in the resulting filter bank has magnitude of 1.
.. image:: https://download.pytorch.org/torchaudio/doc-assets/mel_fbanks.png
:alt: Visualization of generated filter bank
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * melscale_fbanks(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# create filterbank
fb = _create_triangular_filterbank(all_freqs, f_pts)
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def linear_fbanks(
n_freqs: int,
f_min: float,
f_max: float,
n_filter: int,
sample_rate: int,
) -> Tensor:
r"""Creates a linear triangular filterbank.
Note:
For the sake of the numerical compatibility with librosa, not all the coefficients
in the resulting filter bank has magnitude of 1.
.. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png
:alt: Visualization of generated filter bank
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_filter (int): Number of (linear) triangular filter
sample_rate (int): Sample rate of the audio waveform
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * linear_fbanks(A.size(-1), ...)``.
"""
# freq bins
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# filter mid-points
f_pts = torch.linspace(f_min, f_max, n_filter + 2)
# create filterbank
fb = _create_triangular_filterbank(all_freqs, f_pts)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor):
A tensor of dimension `(..., freq, num_frame)` with complex dtype.
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)`
Returns:
Tensor:
Stretched spectrogram. The resulting tensor is of the same dtype as the input
spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time)
>>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231])
"""
if rate == 1.0:
return complex_specgrams
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:]))
# Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32
# Note torch.real is a view so it does not incur any memory copy.
real_dtype = torch.real(complex_specgrams).dtype
time_steps = torch.arange(
0,
complex_specgrams.size(-1),
rate,
device=complex_specgrams.device,
dtype=real_dtype)
alphas = time_steps % 1.0
phase_0 = complex_specgrams[..., :1].angle()
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long())
angle_0 = complex_specgrams_0.angle()
angle_1 = complex_specgrams_1.angle()
norm_0 = complex_specgrams_0.abs()
norm_1 = complex_specgrams_1.abs()
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
complex_specgrams_stretch = torch.polar(mag, phase_acc)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms `(batch, channel, freq, time)`
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions `(batch, channel, freq, time)`
"""
if axis not in [2, 3]:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram `(channel, freq, time)`
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions `(channel, freq, time)`
"""
if axis not in [1, 2]:
raise ValueError('Only Frequency and Time masking are supported')
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
mask = torch.arange(0, specgram.shape[axis], device=specgram.device, dtype=specgram.dtype)
mask = (mask >= mask_start) & (mask < mask_end)
if axis == 1:
mask = mask.unsqueeze(-1)
assert mask_end - mask_start < mask_param
specgram = specgram.masked_fill(mask, mask_value)
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension `(..., freq, time)`
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension `(..., freq, time)`
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension `(..., freq, time)`
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension `(..., frame)`
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
specgram: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)`
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor matching input shape `(..., freq, time)`
"""
input_shape = specgram.shape
num_frames, num_feats = input_shape[-2:]
specgram = specgram.view(-1, num_frames, num_feats)
num_channels = specgram.shape[0]
dtype = specgram.dtype
device = specgram.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_specgram = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = specgram[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = specgram[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = specgram[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_specgram[:, t, :] = specgram[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_specgram[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_specgram[:, t, :] *= variance
cmn_specgram = cmn_specgram.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_specgram = cmn_specgram.squeeze(0)
return cmn_specgram
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension `(..., time)`
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
@_mod_utils.requires_sox()
def apply_codec(
waveform: Tensor,
sample_rate: int,
format: str,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
) -> Tensor:
r"""
Apply codecs as a form of augmentation.
Args:
waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.
sample_rate (int): Sample rate of the audio waveform.
format (str): File format.
channels_first (bool, optional):
When True, both the input and output Tensor have dimension `(channel, time)`.
Otherwise, they have dimension `(time, channel)`.
compression (float or None, optional): Used for formats other than WAV.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
encoding (str or None, optional): Changes the encoding for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
bits_per_sample (int or None, optional): Changes the bit depth for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
Returns:
Tensor: Resulting Tensor.
If ``channels_first=True``, it has `(channel, time)` else `(time, channel)`.
"""
bytes = io.BytesIO()
torchaudio.backend.sox_io_backend.save(bytes,
waveform,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample
)
bytes.seek(0)
augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(
bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format)
return augmented
@_mod_utils.requires_kaldi()
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in *A pitch extraction algorithm tuned
for automatic speech recognition* [:footcite:`6854049`].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: `(batch, frames 2)` where the last dimension
corresponds to pitch and NCCF.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
def _get_sinc_resample_kernel(
orig_freq: int,
new_freq: int,
gcd: int,
lowpass_filter_width: int,
rolloff: float,
resampling_method: str,
beta: Optional[float],
device: torch.device = torch.device("cpu"),
dtype: Optional[torch.dtype] = None):
if not (int(orig_freq) == orig_freq and int(new_freq) == new_freq):
raise Exception(
"Frequencies must be of integer type to ensure quality resampling computation. "
"To work around this, manually convert both frequencies to integer values "
"that maintain their resampling rate ratio before passing them into the function. "
"Example: To downsample a 44100 hz waveform by a factor of 8, use "
"`orig_freq=8` and `new_freq=1` instead of `orig_freq=44100` and `new_freq=5512.5`. "
"For more information, please refer to https://github.com/pytorch/audio/issues/1487."
)
if resampling_method not in ['sinc_interpolation', 'kaiser_window']:
raise ValueError('Invalid resampling method: {}'.format(resampling_method))
orig_freq = int(orig_freq) // gcd
new_freq = int(new_freq) // gcd
assert lowpass_filter_width > 0
kernels = []
base_freq = min(orig_freq, new_freq)
# This will perform antialiasing filtering by removing the highest frequencies.
# At first I thought I only needed this when downsampling, but when upsampling
# you will get edge artifacts without this, as the edge is equivalent to zero padding,
# which will add high freq artifacts.
base_freq *= rolloff
# The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor)
# using the sinc interpolation formula:
# x(t) = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - t))
# We can then sample the function x(t) with a different sample rate:
# y[j] = x(j / new_freq)
# or,
# y[j] = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - j / new_freq))
# We see here that y[j] is the convolution of x[i] with a specific filter, for which
# we take an FIR approximation, stopping when we see at least `lowpass_filter_width` zeros crossing.
# But y[j+1] is going to have a different set of weights and so on, until y[j + new_freq].
# Indeed:
# y[j + new_freq] = sum_i x[i] sinc(pi * orig_freq * ((i / orig_freq - (j + new_freq) / new_freq))
# = sum_i x[i] sinc(pi * orig_freq * ((i - orig_freq) / orig_freq - j / new_freq))
# = sum_i x[i + orig_freq] sinc(pi * orig_freq * (i / orig_freq - j / new_freq))
# so y[j+new_freq] uses the same filter as y[j], but on a shifted version of x by `orig_freq`.
# This will explain the F.conv1d after, with a stride of orig_freq.
width = math.ceil(lowpass_filter_width * orig_freq / base_freq)
# If orig_freq is still big after GCD reduction, most filters will be very unbalanced, i.e.,
# they will have a lot of almost zero values to the left or to the right...
# There is probably a way to evaluate those filters more efficiently, but this is kept for
# future work.
idx_dtype = dtype if dtype is not None else torch.float64
idx = torch.arange(-width, width + orig_freq, device=device, dtype=idx_dtype)
for i in range(new_freq):
t = (-i / new_freq + idx / orig_freq) * base_freq
t = t.clamp_(-lowpass_filter_width, lowpass_filter_width)
# we do not use built in torch windows here as we need to evaluate the window
# at specific positions, not over a regular grid.
if resampling_method == "sinc_interpolation":
window = torch.cos(t * math.pi / lowpass_filter_width / 2)**2
else:
# kaiser_window
if beta is None:
beta = 14.769656459379492
beta_tensor = torch.tensor(float(beta))
window = torch.i0(beta_tensor * torch.sqrt(1 - (t / lowpass_filter_width) ** 2)) / torch.i0(beta_tensor)
t *= math.pi
kernel = torch.where(t == 0, torch.tensor(1.).to(t), torch.sin(t) / t)
kernel.mul_(window)
kernels.append(kernel)
scale = base_freq / orig_freq
kernels = torch.stack(kernels).view(new_freq, 1, -1).mul_(scale)
if dtype is None:
kernels = kernels.to(dtype=torch.float32)
return kernels, width
def _apply_sinc_resample_kernel(
waveform: Tensor,
orig_freq: int,
new_freq: int,
gcd: int,
kernel: Tensor,
width: int,
):
orig_freq = int(orig_freq) // gcd
new_freq = int(new_freq) // gcd
# pack batch
shape = waveform.size()
waveform = waveform.view(-1, shape[-1])
num_wavs, length = waveform.shape
waveform = torch.nn.functional.pad(waveform, (width, width + orig_freq))
resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq)
resampled = resampled.transpose(1, 2).reshape(num_wavs, -1)
target_length = int(math.ceil(new_freq * length / orig_freq))
resampled = resampled[..., :target_length]
# unpack batch
resampled = resampled.view(shape[:-1] + resampled.shape[-1:])
return resampled
def resample(
waveform: Tensor,
orig_freq: int,
new_freq: int,
lowpass_filter_width: int = 6,
rolloff: float = 0.99,
resampling_method: str = "sinc_interpolation",
beta: Optional[float] = None,
) -> Tensor:
r"""Resamples the waveform at the new frequency using bandlimited interpolation.
https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html
Note:
``transforms.Resample`` precomputes and reuses the resampling kernel, so using it will result in
more efficient computation if resampling multiple waveforms with the same resampling parameters.
Args:
waveform (Tensor): The input signal of dimension `(..., time)`
orig_freq (int): The original frequency of the signal
new_freq (int): The desired frequency
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. (Default: ``6``)
rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist.
Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``)
resampling_method (str, optional): The resampling method to use.
Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``)
beta (float or None, optional): The shape parameter used for kaiser window.
Returns:
Tensor: The waveform at the new frequency of dimension `(..., time).`
"""
assert orig_freq > 0.0 and new_freq > 0.0
if orig_freq == new_freq:
return waveform
gcd = math.gcd(int(orig_freq), int(new_freq))
kernel, width = _get_sinc_resample_kernel(orig_freq, new_freq, gcd, lowpass_filter_width, rolloff,
resampling_method, beta, waveform.device, waveform.dtype)
resampled = _apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd, kernel, width)
return resampled
@torch.jit.unused
def edit_distance(seq1: Sequence, seq2: Sequence) -> int:
"""
Calculate the word level edit (Levenshtein) distance between two sequences.
The function computes an edit distance allowing deletion, insertion and
substitution. The result is an integer.
For most applications, the two input sequences should be the same type. If
two strings are given, the output is the edit distance between the two
strings (character edit distance). If two lists of strings are given, the
output is the edit distance between sentences (word edit distance). Users
may want to normalize the output by the length of the reference sequence.
torchscipt is not supported for this function.
Args:
seq1 (Sequence): the first sequence to compare.
seq2 (Sequence): the second sequence to compare.
Returns:
int: The distance between the first and second sequences.
"""
len_sent2 = len(seq2)
dold = list(range(len_sent2 + 1))
dnew = [0 for _ in range(len_sent2 + 1)]
for i in range(1, len(seq1) + 1):
dnew[0] = i
for j in range(1, len_sent2 + 1):
if seq1[i - 1] == seq2[j - 1]:
dnew[j] = dold[j - 1]
else:
substitution = dold[j - 1] + 1
insertion = dnew[j - 1] + 1
deletion = dold[j] + 1
dnew[j] = min(substitution, insertion, deletion)
dnew, dold = dold, dnew
return int(dold[-1])
def pitch_shift(
waveform: Tensor,
sample_rate: int,
n_steps: int,
bins_per_octave: int = 12,
n_fft: int = 512,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window: Optional[Tensor] = None,
) -> Tensor:
"""
Shift the pitch of a waveform by ``n_steps`` steps.
Args:
waveform (Tensor): The input waveform of shape `(..., time)`.
sample_rate (int): Sample rate of `waveform`.
n_steps (int): The (fractional) steps to shift `waveform`.
bins_per_octave (int, optional): The number of steps per octave (Default: ``12``).
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
hop_length (int or None, optional): Length of hop between STFT windows. If None, then
``win_length // 4`` is used (Default: ``None``).
window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
Returns:
Tensor: The pitch-shifted audio waveform of shape `(..., time)`.
"""
if hop_length is None:
hop_length = n_fft // 4
if win_length is None:
win_length = n_fft
if window is None:
window = torch.hann_window(window_length=win_length, device=waveform.device)
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
ori_len = shape[-1]
rate = 2.0 ** (-float(n_steps) / bins_per_octave)
spec_f = torch.stft(input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True)
phase_advance = torch.linspace(0, math.pi * hop_length, spec_f.shape[-2], device=spec_f.device)[..., None]
spec_stretch = phase_vocoder(spec_f, rate, phase_advance)
len_stretch = int(round(ori_len / rate))
waveform_stretch = torch.istft(spec_stretch,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=len_stretch)
waveform_shift = resample(waveform_stretch, int(sample_rate / rate), sample_rate)
shift_len = waveform_shift.size()[-1]
if shift_len > ori_len:
waveform_shift = waveform_shift[..., :ori_len]
else:
waveform_shift = torch.nn.functional.pad(waveform_shift, [0, ori_len - shift_len])
# unpack batch
waveform_shift = waveform_shift.view(shape[:-1] + waveform_shift.shape[-1:])
return waveform_shift
def rnnt_loss(
logits: Tensor,
targets: Tensor,
logit_lengths: Tensor,
target_lengths: Tensor,
blank: int = -1,
clamp: float = -1,
reduction: str = "mean",
):
"""Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks*
[:footcite:`graves2012sequence`].
The RNN Transducer loss extends the CTC loss by defining a distribution over output
sequences of all lengths, and by jointly modelling both input-output and output-output
dependencies.
Args:
logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)`
containing output from joiner
targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded
logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder
target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence
blank (int, optional): blank label (Default: ``-1``)
clamp (float, optional): clamp for gradients (Default: ``-1``)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. (Default: ``'mean'``)
Returns:
Tensor: Loss with the reduction option applied. If ``reduction`` is ``'none'``, then size `(batch)`,
otherwise scalar.
"""
if reduction not in ['none', 'mean', 'sum']:
raise ValueError("reduction should be one of 'none', 'mean', or 'sum'")
if blank < 0: # reinterpret blank index if blank < 0.
blank = logits.shape[-1] + blank
costs, _ = torch.ops.torchaudio.rnnt_loss(
logits=logits,
targets=targets,
logit_lengths=logit_lengths,
target_lengths=target_lengths,
blank=blank,
clamp=clamp,
)
if reduction == 'mean':
return costs.mean()
elif reduction == 'sum':
return costs.sum()
return costs
|
#!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g. the nested loops for defining the configuration matrix)
should be at the top of the file for easy updating.
See this comment for design rationale:
https://github.com/pytorch/vision/pull/1321#issuecomment-531033978
"""
import jinja2
from jinja2 import select_autoescape
import yaml
import os.path
PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CU_VERSIONS_DICT = {"linux": ["cpu", "cu102", "cu111","cu113", "rocm4.1"],
"windows": ["cpu", "cu113"],
"macos": ["cpu"]}
DOC_VERSION = ('linux', '3.8')
def build_workflows(prefix='', upload=False, filter_branch=None, indentation=6):
w = []
w += build_download_job(filter_branch)
for btype in ["wheel", "conda"]:
for os_type in ["linux", "macos", "windows"]:
for python_version in PYTHON_VERSIONS:
for cu_version in CU_VERSIONS_DICT[os_type]:
fb = filter_branch
if cu_version.startswith("rocm") and btype=="conda":
continue
if not fb and (os_type == 'linux' and
btype == 'wheel' and
python_version == '3.8' and
cu_version == 'cpu'):
# the fields must match the build_docs "requires" dependency
fb = '/.*/'
w += build_workflow_pair(btype, os_type, python_version, cu_version, fb, prefix, upload)
if not filter_branch:
# Build on every pull request, but upload only on nightly and tags
w += build_doc_job('/.*/')
w += upload_doc_job('nightly')
w += docstring_parameters_sync_job(None)
return indent(indentation, w)
def build_download_job(filter_branch):
job = {
"name": "download_third_parties_nix",
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"download_third_parties_nix": job}]
def build_workflow_pair(btype, os_type, python_version, cu_version, filter_branch, prefix='', upload=False):
w = []
base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}_{cu_version}"
w.append(generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype))
if upload:
w.append(generate_upload_workflow(base_workflow_name, filter_branch, os_type, btype, cu_version))
if filter_branch == 'nightly' and os_type != 'macos':
pydistro = 'pip' if btype == 'wheel' else 'conda'
w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type))
return w
def build_doc_job(filter_branch):
job = {
"name": "build_docs",
"python_version": "3.8",
"requires": ["binary_linux_wheel_py3.8_cpu", ],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"build_docs": job}]
def upload_doc_job(filter_branch):
job = {
"name": "upload_docs",
"context": "org-member",
"python_version": "3.8",
"requires": ["build_docs", ],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"upload_docs": job}]
def docstring_parameters_sync_job(filter_branch):
job = {
"name": "docstring_parameters_sync",
"python_version": "3.8",
"requires": ["binary_linux_wheel_py3.8_cpu", ],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"docstring_parameters_sync": job}]
def generate_base_workflow(base_workflow_name, python_version, cu_version, filter_branch, os_type, btype):
d = {
"name": base_workflow_name,
"python_version": python_version,
"cuda_version": cu_version,
}
if os_type in ['linux', 'macos']:
d['requires'] = ['download_third_parties_nix']
if btype == 'conda':
d['conda_docker_image'] = f'pytorch/conda-builder:{cu_version.replace("cu1","cuda1")}'
elif cu_version.startswith('cu'):
d['wheel_docker_image'] = f'pytorch/manylinux-{cu_version.replace("cu1","cuda1")}'
elif cu_version.startswith('rocm'):
d["wheel_docker_image"] = f"pytorch/manylinux-rocm:{cu_version[len('rocm'):]}"
if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch)
return {f"binary_{os_type}_{btype}": d}
def gen_filter_branch_tree(*branches):
return {
"branches": {
"only": list(branches),
},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
}
}
def generate_upload_workflow(base_workflow_name, filter_branch, os_type, btype, cu_version):
d = {
"name": "{base_workflow_name}_upload".format(base_workflow_name=base_workflow_name),
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == 'wheel':
d["subfolder"] = "" if os_type == 'macos' else cu_version + "/"
if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch)
return {"binary_{btype}_upload".format(btype=btype): d}
def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, cu_version, os_type):
required_build_suffix = "_upload"
required_build_name = base_workflow_name + required_build_suffix
smoke_suffix = f"smoke_test_{pydistro}".format(pydistro=pydistro)
d = {
"name": f"{base_workflow_name}_{smoke_suffix}",
"requires": [required_build_name],
"python_version": python_version,
"cuda_version": cu_version,
}
if filter_branch:
d["filters"] = gen_filter_branch_tree(filter_branch)
smoke_name = f"smoke_test_{os_type}_{pydistro}"
if pydistro == "conda" and os_type == "linux" and cu_version != "cpu":
smoke_name += "_gpu"
return {smoke_name: d}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(yaml.dump(data_list).splitlines())
def unittest_workflows(indentation=6):
jobs = []
jobs += build_download_job(None)
for os_type in ["linux", "windows", "macos"]:
for device_type in ["cpu", "gpu"]:
if os_type == "macos" and device_type == "gpu":
continue
for i, python_version in enumerate(PYTHON_VERSIONS):
job = {
"name": f"unittest_{os_type}_{device_type}_py{python_version}",
"python_version": python_version,
"cuda_version": 'cpu' if device_type == "cpu" else "cu113",
}
if os_type != "windows":
job['requires'] = ['download_third_parties_nix']
jobs.append({f"unittest_{os_type}_{device_type}": job})
if i == 0 and os_type == "linux" and device_type == "cpu":
jobs.append({
"stylecheck": {
"name": f"stylecheck_py{python_version}",
"python_version": python_version,
"cuda_version": "cpu",
}
})
return indent(indentation, jobs)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=select_autoescape(enabled_extensions=('html', 'xml')),
)
with open(os.path.join(d, 'config.yml'), 'w') as f:
f.write(env.get_template('config.yml.in').render(
build_workflows=build_workflows,
unittest_workflows=unittest_workflows,
))
f.write("\n")
|
#!/usr/bin/env python
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse
import codecs
import difflib
import fnmatch
import io
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='{}\t(original)'.format(file),
tofile='{}\t(reformatted)'.format(file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__,
e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf-8')
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default='clang-format')
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true')
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == '__main__':
sys.exit(main())
|
import asyncio
import aiohttp # type: ignore
import math
import os
import datetime
import re
import boto3 # type: ignore
import json
import io
import argparse
import gzip
import os
from cryptography.hazmat.backends import default_backend
import jwt
import requests
import time
from typing import *
BUCKET = os.getenv("bucket", "ossci-job-status")
APP_ID = int(os.environ["app_id"])
# The private key needs to maintain its newlines, get it via
# $ cat key.pem | tr '\n' '|' | pbcopy
PRIVATE_KEY = os.environ["private_key"].replace("|", "\n")
def app_headers() -> Dict[str, str]:
cert_bytes = PRIVATE_KEY.encode()
private_key = default_backend().load_pem_private_key(cert_bytes, None) # type: ignore
time_since_epoch_in_seconds = int(time.time())
payload = {
# issued at time
"iat": time_since_epoch_in_seconds,
# JWT expiration time (10 minute maximum)
"exp": time_since_epoch_in_seconds + (10 * 60),
# GitHub App's identifier
"iss": APP_ID,
}
actual_jwt = jwt.encode(payload, private_key, algorithm="RS256")
headers = {
"Authorization": f"Bearer {actual_jwt}",
"Accept": "application/vnd.github.machine-man-preview+json",
}
return headers
def jprint(obj: Any) -> None:
print(json.dumps(obj, indent=2))
def installation_id(user: str) -> int:
r_bytes = requests.get(
"https://api.github.com/app/installations", headers=app_headers()
)
r = json.loads(r_bytes.content.decode())
for item in r:
if item["account"]["login"] == user:
return int(item["id"])
raise RuntimeError(f"User {user} not found in {r}")
def user_token(user: str) -> str:
"""
Authorize this request with the GitHub app set by the 'app_id' and
'private_key' environment variables.
1. Get the installation ID for the user that has installed the app
2. Request a new token for that user
3. Return it so it can be used in future API requests
"""
# Hardcode the installation to PyTorch so we can always get a valid ID key
id = installation_id("pytorch")
url = f"https://api.github.com/app/installations/{id}/access_tokens"
r_bytes = requests.post(url, headers=app_headers())
r = json.loads(r_bytes.content.decode())
token = str(r["token"])
return token
if "AWS_KEY_ID" in os.environ and "AWS_SECRET_KEY" in os.environ:
# Use keys for local development
session = boto3.Session(
aws_access_key_id=os.environ.get("AWS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_KEY"),
)
else:
# In the Lambda, use permissions on the Lambda's role
session = boto3.Session()
s3 = session.resource("s3")
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def head_commit_query(user: str, repo: str, branches: List[str]) -> str:
"""
Fetch the head commit for a list of branches
"""
def branch_part(branch: str, num: int) -> str:
return f"""
r{num}: repository(name: "{repo}", owner: "{user}") {{
ref(qualifiedName:"refs/heads/{branch}") {{
name
target {{
... on Commit {{
oid
}}
}}
}}
}}
"""
parts = [branch_part(branch, i) for i, branch in enumerate(branches)]
return "{" + "\n".join(parts) + "}"
def extract_gha(suites: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for suite in suites:
suite = suite["node"]
if suite["workflowRun"] is None:
# If no jobs were triggered this will be empty
continue
workflow = suite["workflowRun"]["workflow"]["name"]
for run in suite["checkRuns"]["nodes"]:
conclusion = run["conclusion"]
if conclusion is None:
if run["status"].lower() == "queued":
conclusion = "queued"
elif run["status"].lower() == "in_progress":
conclusion = "pending"
else:
raise RuntimeError(f"unexpected run {run}")
jobs.append(
{
"name": f"{workflow} / {run['name']}",
"status": conclusion.lower(),
"url": run["detailsUrl"],
}
)
return jobs
def extract_status(contexts: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for context in contexts:
jobs.append(
{
"name": context["context"],
"status": context["state"].lower(),
"url": context["targetUrl"],
}
)
return jobs
def extract_jobs(raw_commits: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
commits = []
for raw_commit in raw_commits:
if raw_commit["status"] is None:
# Will be none if no non-GHA jobs were triggered
status = []
else:
status = extract_status(raw_commit["status"]["contexts"])
gha = extract_gha(raw_commit["checkSuites"]["edges"])
jobs = status + gha
if raw_commit["author"]["user"] is None:
author = raw_commit["author"]["name"]
else:
author = raw_commit["author"]["user"]["login"]
commits.append(
{
"sha": raw_commit["oid"],
"headline": raw_commit["messageHeadline"],
"body": raw_commit["messageBody"],
"author": author,
"date": raw_commit["authoredDate"],
"jobs": jobs,
}
)
return commits
class BranchHandler:
def __init__(
self,
gql: Any,
user: str,
repo: str,
name: str,
head: str,
history_size: int,
fetch_size: int,
):
self.gql = gql
self.user = user
self.repo = repo
self.name = name
self.head = head
self.fetch_size = fetch_size
self.history_size = history_size
def write_to_s3(self, data: Any) -> None:
content = json.dumps(data, default=str)
buf = io.BytesIO()
gzipfile = gzip.GzipFile(fileobj=buf, mode="w")
gzipfile.write(content.encode())
gzipfile.close()
bucket = s3.Bucket(BUCKET)
prefix = f"v6/{self.user}/{self.repo}/{self.name.replace('/', '_')}.json"
bucket.put_object(
Key=prefix,
Body=buf.getvalue(),
ContentType="application/json",
ContentEncoding="gzip",
Expires="0",
)
print(f"Wrote {len(data)} commits from {self.name} to {prefix}")
def query(self, offset: int) -> str:
after = ""
# The cursor for fetches are formatted like after: "<sha> <offset>", but
# the first commit isn't included, so shift all the offsets and don't
# use an "after" for the first batch
if offset > 0:
after = f', after: "{self.head} {offset - 1}"'
return f"""
{{
repository(name: "{self.repo}", owner: "{self.user}") {{
ref(qualifiedName:"refs/heads/{self.name}") {{
name
target {{
... on Commit {{
history(first:{self.fetch_size}{after}) {{
nodes {{
oid
messageBody
messageHeadline
author {{
name
user {{
login
}}
}}
authoredDate
checkSuites(first:100) {{
edges {{
node {{
checkRuns(first:100) {{
nodes {{
name
status
conclusion
detailsUrl
}}
}}
workflowRun {{
workflow {{
name
}}
}}
}}
}}
}}
status {{
contexts {{
context
state
targetUrl
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
def check_response(self, gql_response: Any) -> None:
# Just check that this path in the dict exists
gql_response["data"]["repository"]["ref"]["target"]["history"]["nodes"]
async def run(self) -> None:
"""
Fetch history for the branch (in batches) and merge them all together
"""
# GitHub's API errors out if you try to fetch too much data at once, so
# split up the 100 commits into batches of 'self.fetch_size'
fetches = math.ceil(self.history_size / self.fetch_size)
async def fetch(i: int) -> Any:
try:
return await self.gql.query(
self.query(offset=self.fetch_size * i), verify=self.check_response
)
except Exception as e:
print(
f"Error: {e}\nFailed to fetch {self.user}/{self.repo}/{self.name} on batch {i} / {fetches}"
)
return None
coros = [fetch(i) for i in range(fetches)]
result = await asyncio.gather(*coros)
raw_commits = []
print(f"Parsing results {self.name}")
# Merge all the batches
for r in result:
if r is None:
continue
try:
commits_batch = r["data"]["repository"]["ref"]["target"]["history"][
"nodes"
]
raw_commits += commits_batch
except Exception as e:
# Errors here are expected if the branch has less than HISTORY_SIZE
# commits (GitHub will just time out). There's no easy way to find
# this number ahead of time and avoid errors, but if we had that
# then we could delete this try-catch.
print(f"Error: Didn't find history in commit batch: {e}\n{r}")
# Pull out the data and format it
commits = extract_jobs(raw_commits)
print(f"Writing results for {self.name} to S3")
# Store gzip'ed data to S3
# print(len(commits))
# print(commits)
self.write_to_s3(commits)
class GraphQL:
def __init__(self, session: aiohttp.ClientSession) -> None:
self.session = session
def log_rate_limit(self, headers: Any) -> None:
remaining = headers.get("X-RateLimit-Remaining")
used = headers.get("X-RateLimit-Used")
total = headers.get("X-RateLimit-Limit")
reset_timestamp = int(headers.get("X-RateLimit-Reset", 0)) # type: ignore
reset = datetime.datetime.fromtimestamp(reset_timestamp).strftime(
"%a, %d %b %Y %H:%M:%S"
)
print(
f"[rate limit] Used {used}, {remaining} / {total} remaining, reset at {reset}"
)
async def query(
self,
query: str,
verify: Optional[Callable[[Any], None]] = None,
retries: int = 5,
) -> Any:
"""
Run an authenticated GraphQL query
"""
# Remove unnecessary white space
query = compress_query(query)
if retries <= 0:
raise RuntimeError(f"Query {query[:100]} failed, no retries left")
url = "https://api.github.com/graphql"
try:
async with self.session.post(url, json={"query": query}) as resp:
self.log_rate_limit(resp.headers)
r = await resp.json()
if "data" not in r:
raise RuntimeError(r)
if verify is not None:
verify(r)
return r
except Exception as e:
print(
f"Retrying query {query[:100]}, remaining attempts: {retries - 1}\n{e}"
)
return await self.query(query, verify=verify, retries=retries - 1)
async def main(
user: str, repo: str, branches: List[str], history_size: int, fetch_size: int
) -> None:
"""
Grab a list of all the head commits for each branch, then fetch all the jobs
for the last 'history_size' commits on that branch
"""
async with aiohttp.ClientSession(
headers={
"Authorization": "token {}".format(user_token(user)),
"Accept": "application/vnd.github.machine-man-preview+json",
}
) as aiosession:
gql = GraphQL(aiosession)
print(f"Querying branches: {branches}")
heads = await gql.query(head_commit_query(user, repo, branches))
handlers = []
for head in heads["data"].values():
sha = head["ref"]["target"]["oid"]
branch = head["ref"]["name"]
handlers.append(
BranchHandler(gql, user, repo, branch, sha, history_size, fetch_size)
)
await asyncio.gather(*[h.run() for h in handlers])
def lambda_handler(event: Any, context: Any) -> None:
"""
'event' here is the payload configured from EventBridge (or set manually
via environment variables)
"""
data: Dict[str, Any] = {
"branches": None,
"user": None,
"repo": None,
"history_size": None,
"fetch_size": None,
}
for key in data.keys():
if key in os.environ:
data[key] = os.environ[key]
else:
data[key] = event[key]
if any(x is None for x in data.values()):
raise RuntimeError(
"Data missing from configuration, it must be set as an environment "
f"variable or as the input JSON payload in the Lambda event:\n{data}"
)
data["history_size"] = int(data["history_size"])
data["fetch_size"] = int(data["fetch_size"])
data["branches"] = data["branches"].split(",")
# return
asyncio.run(main(**data))
# if os.getenv("DEBUG", "0") == "1":
# # For local development
# lambda_handler(
# {
# "branches": "release/1.10",
# "user": "pytorch",
# "repo": "pytorch",
# "history_size": 100,
# "fetch_size": 10,
# },
# None,
# )
parser = argparse.ArgumentParser(description="Update JSON in S3 for a branch")
parser.add_argument("--branch", required=True)
parser.add_argument("--repo", required=True)
parser.add_argument("--user", required=True)
parser.add_argument("--fetch_size", default=10)
parser.add_argument("--history_size", default=100)
args = parser.parse_args()
lambda_handler(
{
"branches": args.branch,
"user": args.user,
"repo": args.repo,
"history_size": int(args.history_size),
"fetch_size": int(args.fetch_size),
},
None,
)
|
#!/usr/bin/env python3
from pathlib import Path
import jinja2
import os
from dataclasses import dataclass
from typing import Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
GITHUB_DIR = REPO_ROOT / ".github"
CRONS = {
"5 minutes": "*/5 * * * *",
"1 hour": "0 * * * *",
}
@dataclass
class Branch:
branch: str
cron: str = CRONS["1 hour"]
fetch_size: int = 4
history_size: int = 100
HUD_JOBS = {
"pytorch": {
"pytorch": [
Branch(branch="master", fetch_size=2, cron=CRONS["5 minutes"]),
Branch(branch="nightly", fetch_size=2),
Branch(branch="release/1.10", fetch_size=2),
Branch(branch="viable/strict", fetch_size=2),
],
"vision": [Branch(branch="main"), Branch(branch="release/0.11")],
"audio": [Branch(branch="main"), Branch(branch="release/0.10")],
"text": [Branch(branch="main"), Branch(branch="release/0.11")],
"examples": [Branch(branch="master")],
"tutorials": [Branch(branch="master")],
"torchx": [Branch(branch="main")],
},
"PyTorchLightning": {"pytorch-lightning": [Branch(branch="master")]},
}
class CIWorkflow:
name: str
template: str
def __init__(self, name: str, template: str, **kwargs: Any) -> None:
self.name = name
self.template = template
for key, value in kwargs.items():
setattr(self, key, value)
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
output_file_path = GITHUB_DIR / f"workflows/generated-{self.name}.yml"
with open(output_file_path, "w") as output_file:
filename = Path(workflow_template.filename).relative_to(REPO_ROOT)
output_file.write("# @generated DO NOT EDIT MANUALLY\n")
output_file.write(f"# Generated from {filename}\n")
output_file.write(workflow_template.render(self.__dict__))
output_file.write("\n")
print("Wrote", output_file_path.relative_to(REPO_ROOT))
WORKFLOWS = []
for user_name, repos in HUD_JOBS.items():
for repo_name, branches in repos.items():
for branch in branches:
WORKFLOWS.append(
CIWorkflow(
template="update_github_status.yml.j2",
repo=repo_name,
user=user_name,
branch=branch.branch,
name=f"update-github-status-{user_name}-{repo_name}-{branch.branch.replace('/', '_')}",
cron=branch.cron,
fetch_size=branch.fetch_size,
history_size=branch.history_size,
)
)
if __name__ == "__main__":
jinja_env = jinja2.Environment(
variable_start_string="!{{",
loader=jinja2.FileSystemLoader(str(GITHUB_DIR / "templates")),
undefined=jinja2.StrictUndefined,
)
# Delete the existing generated files first, this should align with .gitattributes file description.
existing_workflows = GITHUB_DIR.glob("workflows/generated-*")
for w in existing_workflows:
try:
os.remove(w)
except Exception as e:
print(f"Error occurred when deleting file {w}: {e}")
for workflow in WORKFLOWS:
template = jinja_env.get_template(workflow.template)
workflow.generate_workflow_file(workflow_template=template)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import os.path
import shutil
import subprocess
import tarfile
import textwrap
import urllib.request
import uuid
import zipfile
from os import walk
from shutil import copyfile
import yaml
PETCTL_DIR = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Format a multiline command into a single line by trimming white spaces
# and replacing newlines with spaces
def format_command(cmd):
return textwrap.dedent(cmd).strip().replace(os.linesep, " ")
# This method runs all commands in a separate
# process and returns the output
def run_commands(cmds):
set_kubeconfig_environment_var()
for cmd in cmds:
process = subprocess.run(
cmd,
shell=True,
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ,
)
if process.stdout:
logger.info(process.stdout)
if process.stderr:
logger.info(process.stderr)
return process.stdout
# Configures job yaml file based on user inputs
def configure_yaml(args):
SAMPLE_YAML_FILE = os.path.join(PETCTL_DIR, "config", "sample_specs.yaml")
result_yaml_file = os.path.join(PETCTL_DIR, "config", "azure-pytorch-elastic.yaml")
logger.info(f"Configuring job yaml {result_yaml_file}")
with open(SAMPLE_YAML_FILE) as f:
data = yaml.load(f)
data["spec"]["parallelism"] = args.max_size
data["spec"]["template"]["spec"]["containers"][0]["env"].extend(
[
{"name": "JOB_ID", "value": str(uuid.uuid1()) + "_" + args.name},
{"name": "MIN_SIZE", "value": str(args.min_size)},
{"name": "MAX_SIZE", "value": str(args.max_size)},
]
)
yaml.dump(data, open(result_yaml_file, "w"))
# Configures job yaml file based on user docker image
def configure_yaml_storage(container_name):
yaml_file = os.path.join(PETCTL_DIR, "config/azure-pytorch-elastic.yaml")
logger.info(f"Configuring job yaml {yaml_file}")
with open(yaml_file) as f:
data = yaml.load(f)
data["spec"]["template"]["spec"]["volumes"][0]["flexVolume"]["options"][
"container"
] = container_name
yaml.dump(data, open(yaml_file, "w"))
# Configures job yaml file based on user docker image
def configure_yaml_docker(image_name):
yaml_file = os.path.join(PETCTL_DIR, "config/azure-pytorch-elastic.yaml")
logger.info(f"Configuring job yaml {yaml_file}")
with open(yaml_file) as f:
data = yaml.load(f)
data["spec"]["template"]["spec"]["containers"][0]["image"] = image_name
yaml.dump(data, open(yaml_file, "w"))
# Configures kubernetes json file based on user inputs
def configure_json(args):
KUBERNETES_JSON_FILE = os.path.join(PETCTL_DIR, "config/kubernetes.json")
result_json_file = os.path.join(PETCTL_DIR, "config/", "kubernetes.json")
logger.info(f"Configuring kubernetes specs {result_json_file}")
with open(KUBERNETES_JSON_FILE) as f:
data = json.load(f)
data["properties"]["masterProfile"]["count"] = 1
data["properties"]["agentPoolProfiles"][0]["count"] = args.min_size
data["properties"]["masterProfile"]["vmSize"] = args.master_vm
data["properties"]["agentPoolProfiles"][0]["vmSize"] = args.worker_vm
json.dump(data, open(result_json_file, "w"), indent=4)
# Download AKS engine installer script for Linux
def download_aks_engine_script():
url = (
"https://raw.githubusercontent.com/Azure/aks-engine/master/scripts/get-akse.sh"
)
urllib.request.urlretrieve(url, "config/get-akse.sh")
logger.info("Downloading aks engine script.....")
# Download AKS engine binary for Windows
def download_aks_engine_script_for_windows():
print("Downloading aks engine binary.....")
url = (
"https://github.com/Azure/aks-engine/releases"
"/download/v0.47.0/aks-engine-v0.47.0-windows-amd64.zip"
)
filename, _ = urllib.request.urlretrieve(url, "config/aks.zip")
zip_file_object = zipfile.ZipFile(filename, "r")
for name in zip_file_object.namelist():
if "aks-engine.exe" in name:
zip_file_object.extract(name, "aks-engine")
copyfile("aks-engine/" + name, "aks-engine.exe")
break
# Installs AKS engine from the script/binary
def install_aks_engine():
if os.name == "nt":
download_aks_engine_script_for_windows()
else:
download_aks_engine_script()
commands = ["chmod 700 config/get-akse.sh", "./config/get-akse.sh"]
run_commands(commands)
# Download AzCopy script to upload to AzureBlobStorage
def download_azcopy_script():
print("Downloading azcopy cli")
url = "https://aka.ms/downloadazcopy-v10-linux"
filename, _ = urllib.request.urlretrieve(url, "config/azcopy.tar.gz")
tar_file_object = tarfile.open(filename, "r:gz")
for member in tar_file_object.getmembers():
if member.isreg():
member.name = os.path.basename(member.name)
if "azcopy" == member.name:
tar_file_object.extract(member.name, ".")
break
# Download AzCopy script for windows
def download_azcopy_script_for_windows():
url = "https://aka.ms/downloadazcopy-v10-windows"
filename, _ = urllib.request.urlretrieve(url, "config/azcopy.zip")
zip_file_object = zipfile.ZipFile(filename, "r")
for member in zip_file_object.infolist():
if not member.is_dir():
member.filename = os.path.basename(member.filename)
if "azcopy" in member.filename:
zip_file_object.extract(member, ".")
break
"""
Helper function to upload to AzureBlob storage based on
Storage account,
Storage container,
SAS Token
"""
def upload_to_azure_blob(args):
destination = (
f"https://{args.account_name}.blob.core.windows.net/"
"{args.container_name}{args.sas_token}"
)
if os.name == "nt":
download_azcopy_script_for_windows()
commands = [
format_command(
f"""
azcopy copy "{args.source_path}"
"{destination}"
--recursive=True"""
)
]
run_commands(commands)
else:
download_azcopy_script()
commands = [
format_command(
f"""
./azcopy copy "{args.source_path}"
"{destination}"
--recursive=True"""
)
]
run_commands(commands)
configure_yaml_storage(args.container_name)
"""
Sets KUBECONFIG environment variable to
the path to the json file generated
"""
def set_kubeconfig_environment_var():
if os.path.isdir("_output"):
config_path = os.path.join(
PETCTL_DIR, "_output", "azure-pytorch-elastic", "kubeconfig"
)
logger.info(f"Reading KUBECONFIG environment variable from {config_path}")
for files in walk(config_path):
for f in files:
if f and f[0].endswith(".json"):
config_path = os.path.join(config_path, f[0])
if config_path.endswith(".json"):
os.environ["KUBECONFIG"] = config_path
logger.info(
f"Setting KUBECONFIG env variable {os.environ.get('KUBECONFIG')}"
)
# Create storage secret named 'pet-blob-secret'
def create_storage_secrets(args):
commands = [
format_command(
f"""
kubectl create secret generic pet-blob-secret
--from-literal accountname={args.account_name}
--from-literal accountkey={args.account_key}
--type=azure/blobfuse"""
)
]
run_commands(commands)
# Install Azure blobfuse drivers
def install_blobfuse_drivers():
commands = [
"kubectl apply -f "
+ "https://raw.githubusercontent.com/Azure/kubernetes-volume-drivers"
+ "/master/flexvolume/blobfuse/deployment/blobfuse-flexvol-installer-1.9.yaml"
]
run_commands(commands)
# Create docker image secrets given user inputs
def create_docker_image_secret(args):
configure_yaml_docker(args.image_name)
commands = [
format_command(
f"""
kubectl create secret
docker-registry pet-docker-secret
--docker-server={args.server}
--docker-username={args.username}
--docker-password={args.password}
--docker-email='[email protected]'"""
)
]
run_commands(commands)
logger.info("Docker image registered..")
# Deploy AKS cluster
def deploy_aks_cluster(args):
logger.info("Started AKS cluster deployment. This will take some time .....")
commands = [
format_command(
f"""
aks-engine deploy -f
--subscription-id {args.subscription_id}
--dns-prefix {args.dns_prefix}
--resource-group {args.rg}
--location {args.location}
--api-model config/kubernetes.json
--client-id {args.client_id}
--client-secret {args.client_secret}
--set servicePrincipalProfile.clientId={args.client_id}
--set servicePrincipalProfile.secret={args.client_secret}"""
)
]
run_commands(commands)
# Scale the cluster up and down based on user input
def scale_cluster(args):
command = [
format_command(
f"""
aks-engine scale
--subscription-id {args.subscription_id}
--resource-group {args.rg}
--client-id {args.client_id}
--client-secret {args.client_secret}
--location {args.location}
--api-model _output/azure-pytorch-elastic/apimodel.json
--new-node-count {args.new_node_count}
--apiserver azure-pytorch-elastic.{4}.cloudapp.azure.com"""
)
]
run_commands(command)
def delete_resources_util():
commands = [
"kubectl config delete-cluster azure-pytorch-elastic",
"kubectl delete secret pet-blob-secret",
"kubectl delete namespace --all",
]
run_commands(commands)
if os.path.isdir("_output"):
shutil.rmtree(os.path.join(PETCTL_DIR, "_output"))
logger.info(
(
"Deleted all resources,"
"please manually delete the AKS resources from the Azure Portal."
)
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import util
# Create a Kubernetes specs and YAML job file based on user inputs
def configure(args):
util.configure_yaml(args)
util.configure_json(args)
# Deploys a Kubernetes cluster
def setup(args):
# Install AKS Engine
util.install_aks_engine()
# Deploy an AKS cluster using kubernetes.json
util.deploy_aks_cluster(args)
# Upload code/data to Azure blob storage
def upload_storage(args):
util.upload_to_azure_blob(args)
# Create Azure blob storage secret
def storage_secret(args):
util.create_storage_secrets(args)
# Create docker image secrets
def docker_secret(args):
util.create_docker_image_secret(args)
# Scale the cluster
def scale_cluster(args):
util.scale_cluster(args)
# Submits your training job
def run_job(args):
util.install_blobfuse_drivers()
commands = [
"kubectl delete -f config/azure-pytorch-elastic.yaml",
"kubectl apply -f config/azure-pytorch-elastic.yaml",
"kubectl describe pods",
"kubectl get pods --selector app=azure-pytorch-elastic",
]
util.run_commands(commands)
# Check current status of your pods
def check_status():
commands = [
"kubectl describe pods",
"kubectl get pods --selector app=azure-pytorch-elastic",
]
util.run_commands(commands)
# Get logs of your job from each pod
def get_logs():
util.run_commands(["kubectl logs --selector app=azure-pytorch-elastic "])
# Deletes secrets and cluster
def delete_resources():
util.delete_resources_util()
if __name__ == "__main__":
parser = util.argparse.ArgumentParser()
subparser = parser.add_subparsers(
title="actions", description="setup | configure | run job", dest="command"
)
# ---------------------------------- #
# SETUP #
# ---------------------------------- #
parser_setup = subparser.add_parser(
"setup", help="set up aks-engine, cluster and other dependencies"
)
parser_setup.add_argument(
"--dns_prefix",
type=str,
required=False,
default="azure-pytorch-elastic",
help="Dns prefix of the app",
)
parser_setup.add_argument(
"--subscription_id",
type=str,
required=True,
help="Subscription id of the cluster",
)
parser_setup.add_argument(
"--rg", type=str, required=True, help="Resource group of the cluster"
)
parser_setup.add_argument(
"--location", type=str, required=True, help="Location of the cluster"
)
parser_setup.add_argument(
"--client_id", type=str, required=True, help="Service principal client id"
)
parser_setup.add_argument(
"--client_secret",
type=str,
required=True,
help="Service Principal client secret",
)
parser_setup.set_defaults(func=setup)
# ---------------------------------- #
# CONFIGURE JOB YAML #
# ---------------------------------- #
parser_configure = subparser.add_parser("configure", help="Generate yaml job file")
parser_configure.add_argument("--name", required=True, help="config parameters")
parser_configure.add_argument(
"--min_size",
type=int,
required=False,
help="minimum number of worker hosts to continue training",
)
parser_configure.add_argument(
"--max_size",
type=int,
required=False,
help="maximum number of worker hosts to allow scaling out",
)
parser_configure.add_argument(
"--size",
type=int,
required=False,
help="set size to automatically set min_size = max_size = size",
)
parser_configure.add_argument(
"--master_vm",
type=str,
required=False,
default="Standard_DS1_v2",
help="Azure VM instance for master node",
)
parser_configure.add_argument(
"--worker_vm",
type=str,
required=False,
default="Standard_NC6s_v3",
help="Azure VM instance for woker nodes",
)
parser_configure.set_defaults(func=configure)
# ---------------------------------- #
# UPLOAD STORAGE #
# ---------------------------------- #
parser_upload_storage = subparser.add_parser(
"upload_storage", help="Upload to Azure Blob storage"
)
parser_upload_storage.add_argument(
"--account_name",
type=str,
required=True,
help="Azure Blob storage Account name",
)
parser_upload_storage.add_argument(
"--container_name",
type=str,
required=True,
help="Azure Blob storage container name",
)
parser_upload_storage.add_argument(
"--sas_token", type=str, required=True, help="Azure Blob storage SAS token"
)
parser_upload_storage.add_argument(
"--source_path", type=str, required=True, help="Path to local files"
)
parser_upload_storage.set_defaults(func=upload_storage)
# ---------------------------------- #
# SETUP SECRETS #
# ---------------------------------- #
parser_storage_secret = subparser.add_parser(
"storage_secret", help="Generate secret for Azure Blob storage"
)
parser_storage_secret.add_argument(
"--account_name",
type=str,
required=True,
help="Azure Blob storage account name",
)
parser_storage_secret.add_argument(
"--account_key", type=str, required=True, help="Azure Blob storage account key"
)
parser_storage_secret.set_defaults(func=storage_secret)
parser_docker_secret = subparser.add_parser(
"docker_secret", help="Generate secret for Docker Image"
)
parser_docker_secret.add_argument(
"--server", type=str, required=True, help="Docker server"
)
parser_docker_secret.add_argument(
"--username", type=str, required=True, help="Docker username"
)
parser_docker_secret.add_argument(
"--password", type=str, required=True, help="Docker password"
)
parser_docker_secret.add_argument(
"--image_name", type=str, required=True, help="Docker Imagename"
)
parser_docker_secret.set_defaults(func=docker_secret)
# ---------------------------------- #
# RUN JOB #
# ---------------------------------- #
parser_run_job = subparser.add_parser("run_job", help="Run your training job")
parser_run_job.set_defaults(func=run_job)
# ---------------------------------- #
# CHECK STATUS #
# ---------------------------------- #
parser_check_status = subparser.add_parser(
"check_status", help="Check status of your jobs"
)
parser_run_job.set_defaults(func=check_status)
# ---------------------------------- #
# DELETE RESOURCES #
# ---------------------------------- #
parser_delete_resources = subparser.add_parser(
"delete_resources",
help="Deletes the kubernetes cluster and all namespaces and secrets",
)
parser_delete_resources.set_defaults(func=delete_resources)
# ---------------------------------- #
# GET LOGS #
# ---------------------------------- #
parser_get_logs = subparser.add_parser(
"get_logs", help="Get logs from all your pods"
)
parser_get_logs.set_defaults(func=get_logs)
# ---------------------------------- #
# SCALE CLUSTER #
# ---------------------------------- #
parser_scale = subparser.add_parser("scale", help="Scale up/down your cluster")
parser_scale.add_argument(
"--subscription_id",
type=str,
required=True,
help="Subscription id of the cluster",
)
parser_scale.add_argument(
"--rg", type=str, required=True, help="Resource group of the cluster"
)
parser_scale.add_argument(
"--location", type=str, required=True, help="Location of the cluster"
)
parser_scale.add_argument(
"--client_id", type=str, required=True, help="Service principal client id"
)
parser_scale.add_argument(
"--client_secret",
type=str,
required=True,
help="Service Principal client secret",
)
parser_scale.add_argument(
"--new_node_count",
type=int,
required=True,
help="New node count to scale cluster to",
)
parser_scale.set_defaults(func=util.scale_cluster)
args = parser.parse_args()
# -----
# Execution order: Configure --> Setup --> Run
# -----
if args.command == "configure":
configure(args)
elif args.command == "setup":
setup(args)
elif args.command == "upload_storage":
upload_storage(args)
elif args.command == "storage_secret":
storage_secret(args)
elif args.command == "docker_secret":
docker_secret(args)
elif args.command == "run_job":
run_job(args)
elif args.command == "check_status":
check_status()
elif args.command == "delete_resources":
delete_resources()
elif args.command == "get_logs":
get_logs()
elif args.command == "scale":
scale_cluster(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import pytorch_sphinx_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from docutils import nodes
from sphinx import addnodes
from sphinx.util.docfields import TypedField
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.6"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinxcontrib.katex",
"sphinx.ext.autosectionlabel",
]
# katex options
#
#
katex_options = r"""
delimiters : [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "\\[", right: "\\]", display: true}
]
"""
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyTorch/Elastic"
copyright = "2020, PyTorch Elastic Contributors"
author = "PyTorch Elastic Contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = "v0.2.3.dev0"
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = "master"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"pytorch_project": "elastic",
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
}
html_logo = "_static/img/pytorch-logo-dark.svg"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
# can be deleted).
html_css_files = [
"https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css"
]
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
add_css = getattr(
app, "add_css_file", getattr(app, "add_stylesheet", None)
) # noqa B009
for css_file in html_css_files:
add_css(css_file)
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "TorchElasticdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pytorch.tex",
"Torchelastic Documentation",
"Torch Contributors",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "Torchelastic", "Torchelastic Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Torchelastic",
"Torchelastic Documentation",
author,
"Torchelastic",
"PyTorch Elastic Training",
"Miscellaneous",
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong("", fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(" (")
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = "".join(n.astext() for n in fieldtype)
typename = typename.replace("int", "python:int")
typename = typename.replace("long", "python:long")
typename = typename.replace("float", "python:float")
typename = typename.replace("type", "python:type")
par.extend(
self.make_xrefs(
self.typerolename,
domain,
typename,
addnodes.literal_emphasis,
**kw,
)
)
else:
par += fieldtype
par += nodes.Text(")")
par += nodes.Text(" -- ")
par += content
return par
fieldname = nodes.field_name("", self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item("", handle_item(fieldarg, content))
fieldbody = nodes.field_body("", bodynode)
return nodes.field("", fieldname, fieldbody)
TypedField.make_field = patched_make_field
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
For each rst file, generates a corresponding rst file
that redirects http://pytorch.org/elastic/<version>/<file_name>.html
to http://pytorch.org/elastic/latest/<file_name>.html
"""
import argparse
import glob
import os
import sys
import torchelastic
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--source_dir", required=True, help="directory where rst files are"
)
parser.add_argument("--build_dir", required=True, help="directory to drop md files")
return parser.parse_args(args[1:])
if __name__ == "__main__":
args = parse_args(sys.argv)
build_ver = torchelastic.__version__
source_dir = args.source_dir
build_dir = args.build_dir
print(f"Creating redirect files from source_dir: {source_dir} into {build_dir}")
for rst_file in glob.glob(os.path.join(source_dir, "**/*.rst"), recursive=True):
rst_relative_path = os.path.relpath(rst_file, source_dir)
md_relative_path = os.path.splitext(rst_relative_path)[0] + ".md"
html_relative_path = os.path.splitext(rst_relative_path)[0] + ".html"
md_file = os.path.join(build_dir, md_relative_path)
os.makedirs(os.path.dirname(md_file), exist_ok=True)
print(f"Creating redirect md for {rst_relative_path} --> {md_file}")
with open(md_file, "w") as f:
f.write("---\n")
f.write("layout: docs_redirect\n")
f.write("title: PyTorch | Redirect\n")
f.write(f'redirect_url: "/elastic/{build_ver}/{html_relative_path}"\n')
f.write("---\n")
|
#!/usr/bin/env python3
import io
import os
import pprint
import sys
import torch.distributed as dist
if __name__ == "__main__":
env_dict = {
k: os.environ[k]
for k in (
"LOCAL_RANK",
"RANK",
"GROUP_RANK",
"WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
"TORCHELASTIC_RESTART_COUNT",
"TORCHELASTIC_MAX_RESTARTS",
)
}
with io.StringIO() as buff:
print("======================================================", file=buff)
print(
f"Environment variables set by the agent on PID {os.getpid()}:", file=buff
)
pprint.pprint(env_dict, stream=buff)
print("======================================================", file=buff)
print(buff.getvalue())
sys.stdout.flush()
dist.init_process_group(backend="gloo")
dist.barrier()
print(
(
f"On PID {os.getpid()}, after init process group, "
f"rank={dist.get_rank()}, world_size = {dist.get_world_size()}\n"
)
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
r"""
Source: `pytorch imagenet example <https://github.com/pytorch/examples/blob/master/imagenet/main.py>`_ # noqa B950
Modified and simplified to make the original pytorch example compatible with
torchelastic.distributed.launch.
Changes:
1. Removed ``rank``, ``gpu``, ``multiprocessing-distributed``, ``dist_url`` options.
These are obsolete parameters when using ``torchelastic.distributed.launch``.
2. Removed ``seed``, ``evaluate``, ``pretrained`` options for simplicity.
3. Removed ``resume``, ``start-epoch`` options.
Loads the most recent checkpoint by default.
4. ``batch-size`` is now per GPU (worker) batch size rather than for all GPUs.
5. Defaults ``workers`` (num data loader workers) to ``0``.
Usage
::
>>> python -m torchelastic.distributed.launch
--nnodes=$NUM_NODES
--nproc_per_node=$WORKERS_PER_NODE
--rdzv_id=$JOB_ID
--rdzv_backend=etcd
--rdzv_endpoint=$ETCD_HOST:$ETCD_PORT
main.py
--arch resnet18
--epochs 20
--batch-size 32
<DATA_DIR>
"""
import argparse
import io
import os
import shutil
import time
from contextlib import contextmanager
from datetime import timedelta
from typing import List, Tuple
import numpy
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from torch.distributed.elastic.utils.data import ElasticDistributedSampler
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.utils.data import DataLoader
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch Elastic ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=0,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"-b",
"--batch-size",
default=32,
type=int,
metavar="N",
help="mini-batch size (default: 32), per worker (GPU)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--dist-backend",
default="nccl",
choices=["nccl", "gloo"],
type=str,
help="distributed backend",
)
parser.add_argument(
"--checkpoint-file",
default="/tmp/checkpoint.pth.tar",
type=str,
help="checkpoint file path, to load and save to",
)
def main():
args = parser.parse_args()
device_id = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(device_id)
print(f"=> set cuda device = {device_id}")
dist.init_process_group(
backend=args.dist_backend, init_method="env://", timeout=timedelta(seconds=10)
)
model, criterion, optimizer = initialize_model(
args.arch, args.lr, args.momentum, args.weight_decay, device_id
)
train_loader, val_loader = initialize_data_loader(
args.data, args.batch_size, args.workers
)
# resume from checkpoint if one exists;
state = load_checkpoint(
args.checkpoint_file, device_id, args.arch, model, optimizer
)
start_epoch = state.epoch + 1
print(f"=> start_epoch: {start_epoch}, best_acc1: {state.best_acc1}")
print_freq = args.print_freq
for epoch in range(start_epoch, args.epochs):
state.epoch = epoch
train_loader.batch_sampler.sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args.lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, device_id, print_freq)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, device_id, print_freq)
# remember best acc@1 and save checkpoint
is_best = acc1 > state.best_acc1
state.best_acc1 = max(acc1, state.best_acc1)
if device_id == 0:
save_checkpoint(state, is_best, args.checkpoint_file)
class State:
"""
Container for objects that we want to checkpoint. Represents the
current "state" of the worker. This object is mutable.
"""
def __init__(self, arch, model, optimizer):
self.epoch = -1
self.best_acc1 = 0
self.arch = arch
self.model = model
self.optimizer = optimizer
def capture_snapshot(self):
"""
Essentially a ``serialize()`` function, returns the state as an
object compatible with ``torch.save()``. The following should work
::
snapshot = state_0.capture_snapshot()
state_1.apply_snapshot(snapshot)
assert state_0 == state_1
"""
return {
"epoch": self.epoch,
"best_acc1": self.best_acc1,
"arch": self.arch,
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
def apply_snapshot(self, obj, device_id):
"""
The complimentary function of ``capture_snapshot()``. Applies the
snapshot object that was returned by ``capture_snapshot()``.
This function mutates this state object.
"""
self.epoch = obj["epoch"]
self.best_acc1 = obj["best_acc1"]
self.state_dict = obj["state_dict"]
self.model.load_state_dict(obj["state_dict"])
self.optimizer.load_state_dict(obj["optimizer"])
def save(self, f):
torch.save(self.capture_snapshot(), f)
def load(self, f, device_id):
# Map model to be loaded to specified single gpu.
snapshot = torch.load(f, map_location=f"cuda:{device_id}")
self.apply_snapshot(snapshot, device_id)
def initialize_model(
arch: str, lr: float, momentum: float, weight_decay: float, device_id: int
):
print(f"=> creating model: {arch}")
model = models.__dict__[arch]()
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
model.cuda(device_id)
cudnn.benchmark = True
model = DistributedDataParallel(model, device_ids=[device_id])
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(device_id)
optimizer = SGD(
model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
)
return model, criterion, optimizer
def initialize_data_loader(
data_dir, batch_size, num_data_workers
) -> Tuple[DataLoader, DataLoader]:
traindir = os.path.join(data_dir, "train")
valdir = os.path.join(data_dir, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
train_sampler = ElasticDistributedSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_data_workers,
pin_memory=True,
sampler=train_sampler,
)
val_loader = DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=batch_size,
shuffle=False,
num_workers=num_data_workers,
pin_memory=True,
)
return train_loader, val_loader
def load_checkpoint(
checkpoint_file: str,
device_id: int,
arch: str,
model: DistributedDataParallel,
optimizer, # SGD
) -> State:
"""
Loads a local checkpoint (if any). Otherwise, checks to see if any of
the neighbors have a non-zero state. If so, restore the state
from the rank that has the most up-to-date checkpoint.
.. note:: when your job has access to a globally visible persistent storage
(e.g. nfs mount, S3) you can simply have all workers load
from the most recent checkpoint from such storage. Since this
example is expected to run on vanilla hosts (with no shared
storage) the checkpoints are written to local disk, hence
we have the extra logic to broadcast the checkpoint from a
surviving node.
"""
state = State(arch, model, optimizer)
if os.path.isfile(checkpoint_file):
print(f"=> loading checkpoint file: {checkpoint_file}")
state.load(checkpoint_file, device_id)
print(f"=> loaded checkpoint file: {checkpoint_file}")
# logic below is unnecessary when the checkpoint is visible on all nodes!
# create a temporary cpu pg to broadcast most up-to-date checkpoint
with tmp_process_group(backend="gloo") as pg:
rank = dist.get_rank(group=pg)
# get rank that has the largest state.epoch
epochs = torch.zeros(dist.get_world_size(), dtype=torch.int32)
epochs[rank] = state.epoch
dist.all_reduce(epochs, op=dist.ReduceOp.SUM, group=pg)
t_max_epoch, t_max_rank = torch.max(epochs, dim=0)
max_epoch = t_max_epoch.item()
max_rank = t_max_rank.item()
# max_epoch == -1 means no one has checkpointed return base state
if max_epoch == -1:
print(f"=> no workers have checkpoints, starting from epoch 0")
return state
# broadcast the state from max_rank (which has the most up-to-date state)
# pickle the snapshot, convert it into a byte-blob tensor
# then broadcast it, unpickle it and apply the snapshot
print(f"=> using checkpoint from rank: {max_rank}, max_epoch: {max_epoch}")
with io.BytesIO() as f:
torch.save(state.capture_snapshot(), f)
raw_blob = numpy.frombuffer(f.getvalue(), dtype=numpy.uint8)
blob_len = torch.tensor(len(raw_blob))
dist.broadcast(blob_len, src=max_rank, group=pg)
print(f"=> checkpoint broadcast size is: {blob_len}")
if rank != max_rank:
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
blob = torch.zeros(blob_len.item(), dtype=torch.uint8)
else:
blob = torch.as_tensor(raw_blob, dtype=torch.uint8)
dist.broadcast(blob, src=max_rank, group=pg)
print(f"=> done broadcasting checkpoint")
if rank != max_rank:
with io.BytesIO(blob.numpy()) as f:
snapshot = torch.load(f)
state.apply_snapshot(snapshot, device_id)
# wait till everyone has loaded the checkpoint
dist.barrier(group=pg)
print(f"=> done restoring from previous checkpoint")
return state
@contextmanager
def tmp_process_group(backend):
cpu_pg = dist.new_group(backend=backend)
try:
yield cpu_pg
finally:
dist.destroy_process_group(cpu_pg)
def save_checkpoint(state: State, is_best: bool, filename: str):
checkpoint_dir = os.path.dirname(filename)
os.makedirs(checkpoint_dir, exist_ok=True)
# save to tmp, then commit by moving the file in case the job
# gets interrupted while writing the checkpoint
tmp_filename = filename + ".tmp"
torch.save(state.capture_snapshot(), tmp_filename)
os.rename(tmp_filename, filename)
print(f"=> saved checkpoint for epoch {state.epoch} at {filename}")
if is_best:
best = os.path.join(checkpoint_dir, "model_best.pth.tar")
print(f"=> best model found at epoch {state.epoch} saving to {best}")
shutil.copyfile(filename, best)
def train(
train_loader: DataLoader,
model: DistributedDataParallel,
criterion, # nn.CrossEntropyLoss
optimizer, # SGD,
epoch: int,
device_id: int,
print_freq: int,
):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(device_id, non_blocking=True)
target = target.cuda(device_id, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
def validate(
val_loader: DataLoader,
model: DistributedDataParallel,
criterion, # nn.CrossEntropyLoss
device_id: int,
print_freq: int,
):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if device_id is not None:
images = images.cuda(device_id, non_blocking=True)
target = target.cuda(device_id, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name: str, fmt: str = ":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches: int, meters: List[AverageMeter], prefix: str = ""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch: int) -> None:
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches: int) -> str:
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch: int, lr: float) -> None:
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
learning_rate = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(1, -1).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import time
def wait_for(msg, timeout: float = 300, interval: int = 1, print_spinner: bool = True):
"""
for _ in wait_for("asg to provision", timeout_sec, interval_sec):
if check_condition():.
break
"""
spin = ["-", "/", "|", "\\", "-", "/", "|", "\\"]
idx = 0
start = time.time()
max_time = start + timeout
while True:
if print_spinner:
elapsed = time.time() - start
print(
f"Waiting for {msg}"
f" ({elapsed:03.0f}/{timeout:3.0f}s elapsed) {spin[idx]}\r",
end="",
)
sys.stdout.flush()
idx = (idx + 1) % len(spin)
if time.time() >= max_time:
raise RuntimeError(f"Timed out while waiting for: {msg}")
else:
time.sleep(interval)
yield
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import getpass
import logging
import os
import random
import string
from jinja2 import Template
from util import wait_for
log = logging.getLogger(__name__)
class CloudFormation:
def __init__(self, session):
self._session = session
self._cfn = session.client("cloudformation")
def create_specs_file(self, specs_file, s3_bucket_name, efs_id):
username = getpass.getuser()
rand = "".join(random.choices(string.ascii_uppercase + string.digits, k=5))
hash = f"{username}-{rand}"
stack_name = f"torchelastic-{hash}"
this_dir = os.path.dirname(__file__)
cfn_template = os.path.join(this_dir, "cfn/setup.yml")
sample_specs = os.path.join(this_dir, "config/sample_specs.json")
params = {
"WorkerRoleName": f"torchelastic_worker_role-{hash}",
"RendezvousRoleName": f"torchelastic_rendezvous_role-{hash}",
}
if s3_bucket_name:
params["S3BucketName"] = s3_bucket_name
if efs_id:
params["EFSFileSystemId"] = efs_id
self.create_stack(stack_name, cfn_template, **params)
for _ in wait_for(
f"cfn stack: {stack_name} to create", timeout=600, interval=2
):
status, outputs = self.describe_stack(stack_name)
if status == "CREATE_COMPLETE":
break
elif status == "CREATE_FAILED" or status.startswith("ROLLBACK_"):
# when stack creation fails cfn starts rolling the stack back
raise RuntimeError(
f"Error creating stack {stack_name}, status = {status}"
)
outputs["User"] = username
log.info(f"Writing specs file to: {specs_file}")
with open(sample_specs) as f:
specs_template = Template(f.read())
specs_template.stream(**outputs).dump(specs_file)
def describe_stack(self, stack_name):
describe_res = self._cfn.describe_stacks(StackName=stack_name)
stacks = describe_res["Stacks"]
if len(stacks) > 1:
raise RuntimeError(f"Found more than one stack with name {stack_name}")
stack_desc = stacks[0]
status = stack_desc["StackStatus"]
# cfn outputs an array of maps, each element in the array is
# a single output of the form "{OutputKey: <key>, OutputValue: <value>}"
# simplify to a map of <key>, <value> pairs
outputs = {}
if "Outputs" in stack_desc:
for cfn_output in stack_desc["Outputs"]:
key = cfn_output["OutputKey"]
value = cfn_output["OutputValue"]
outputs[key] = value
return status, outputs
def create_stack(self, stack_name, cfn_template, **params):
log.info(f"Creating cloudformation stack with template: {cfn_template}")
with open(cfn_template) as f:
template_body = f.read()
cfn_parameters = []
for key, value in params.items():
cfn_parameters.append({"ParameterKey": key, "ParameterValue": value})
res = self._cfn.create_stack(
StackName=stack_name,
TemplateBody=template_body,
Capabilities=["CAPABILITY_NAMED_IAM"],
Parameters=cfn_parameters,
)
return res["StackId"]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import getpass
import json
import logging
import os
import sys
from os.path import expanduser
from urllib.parse import urlparse
import auth
from autoscaling import AutoScalingGroup
from cloudformation import CloudFormation
from s3 import S3
log = logging.getLogger(__name__)
PETCTL_DIR = os.path.join(expanduser("~"), ".petctl")
PETCTL_CONFIG_FILE = os.path.join(PETCTL_DIR, "config")
SPECS_FILE = os.path.join(PETCTL_DIR, "specs.json")
def split_args(args, delimiter="--"):
if delimiter in args:
idx = args.index(delimiter)
if idx == (len(args) - 1):
return args, []
else:
return args[0:idx], args[idx + 1 :]
else:
return args, []
def parse_arguments(args, **default_args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--specs_file",
help="see https://github.com/pytorch/elastic/blob/master/aws/README.md#create-specs-file", # noqa B950
)
parser.set_defaults(**default_args)
subparser = parser.add_subparsers(
title="actions", description="run_job | kill_job", dest="command"
)
# -----------------------------------------
# Run Job
# -----------------------------------------
parser_run_job = subparser.add_parser(
"run_job", help="runs a torchelastic job on asg"
)
parser_run_job.add_argument("--name", required=True, help="name of the job")
parser_run_job.add_argument(
"--min_size",
type=int,
required=False,
help="minimum number of worker hosts to continue training",
)
parser_run_job.add_argument(
"--max_size",
type=int,
required=False,
help="maximum number of worker hosts to allow scaling out",
)
parser_run_job.add_argument(
"--size",
type=int,
required=True,
help="number of worker hosts to start the job with",
)
parser_run_job.add_argument(
"--instance_type", required=False, help="Instance type to run the job on"
)
parser_run_job.add_argument(
dest="script_path",
help="script or script dir path (e.g. ~/script.py, s3://..., docker://)",
)
parser_run_job.set_defaults(func=run_job)
# -----------------------------------------
# Kill Job
# -----------------------------------------
parser_kill_job = subparser.add_parser(
"kill_job", help="kills a torchelastic job on asg"
)
parser_kill_job.add_argument(dest="job_name", help="name of the job to kill")
parser_kill_job.set_defaults(func=kill_job)
# -----------------------------------------
# List hosts in job
# -----------------------------------------
parser_list_hosts = subparser.add_parser(
"list_hosts", help="lists InService hosts in the job"
)
parser_list_hosts.add_argument(
dest="job_name", help="name of the job to list the hosts for"
)
parser_list_hosts.set_defaults(func=list_hosts)
# -----------------------------------------
# Upload script
# -----------------------------------------
parser_upload = subparser.add_parser("upload", help="uploads the file/dir to s3")
parser_upload.add_argument(
dest="script_path",
help="script or script dir path (e.g. ~/script.py, s3://..., docker://)",
)
parser_upload.add_argument(
dest="s3_dest",
help="s3 destination (default: s3://{s3_bucket}/{s3_prefix}/{USER}/scripts)",
)
parser_upload.set_defaults(func=upload_script)
# -----------------------------------------
# Configure
# -----------------------------------------
subparser.add_parser("configure", help="configures petctl")
# -----------------------------------------
# Setup
# -----------------------------------------
parser_setup = subparser.add_parser(
"setup", help="creates necessary aws resources and outputs a specs file"
)
parser_setup.add_argument(
"--region", default="us-west-2", help="aws region to setup on"
)
parser_setup.add_argument(
"--s3_bucket",
help="s3 bucket to use for running petctl (if empty, one is created)",
)
parser_setup.add_argument(
"--efs_id", help="efs id to use, if empty, one is created"
)
petctl_args, script_args = split_args(args[1:])
parsed = parser.parse_args(petctl_args)
parsed.script_args = script_args
return parsed
def load_specs_json(file):
log.info(f"Loading launch specs from: {args.specs_file}")
with open(file) as f:
return json.load(f)
def run_job(session, specs_json, args):
job_name = args.name
script_args = args.script_args
rdzv_specs = specs_json["rdzv"]
worker_specs = specs_json["worker"]
script_url = urlparse(args.script_path)
scheme = script_url.scheme
if scheme == "docker":
# docker://tmp/script.py -> tmp/script.py (relative to working dir in docker)
# docker:///tmp/script.py -> /tmp/script.py (absolute path in docker)
script = script_url.netloc + script_url.path
elif scheme == "s3":
# fetch_and_run supports s3:// so just pass through
script = args.script_path
else:
s3_bucket = worker_specs["s3_bucket"]
s3_prefix = worker_specs["s3_prefix"]
script = S3(session).cp(args.script_path, s3_bucket, f"{s3_prefix}/{job_name}")
asg = AutoScalingGroup(session)
rdzv_asg_name = f"{job_name}_rdzv"
worker_asg_name = f"{job_name}_worker"
# create a single node asg to host the etcd server for rendezvous
etcd_server_hostname = asg.create_asg_sync(rdzv_asg_name, size=1, **rdzv_specs)[0]
rdzv_endpoint = f"{etcd_server_hostname}:2379"
# allow overriding instance types from cli
if args.instance_type:
worker_specs["instance_type"] = args.instance_type
worker_specs["rdzv_endpoint"] = rdzv_endpoint
worker_specs["job_name"] = job_name
worker_specs["script"] = script
worker_specs["args"] = " ".join(script_args)
worker_specs["user"] = getpass.getuser()
instance_type = worker_specs["instance_type"]
script_args_str = worker_specs["args"]
log.info(
f"\n------------------------------------------------------------------\n"
f"Starting job...\n"
f" job name : {job_name}\n"
f" instance type: {instance_type}\n"
f" size : {args.size} (min={args.min_size}, max={args.max_size})\n"
f" rdzv endpoint: {rdzv_endpoint}\n"
f" cmd : {script}\n"
f" cmd args : {script_args_str}\n"
f"------------------------------------------------------------------\n"
)
asg.create_asg(
worker_asg_name, args.size, args.min_size, args.max_size, **worker_specs
)
def kill_job(session, specs_json, args):
job_name = args.job_name
log.info(f"Killing job {job_name}")
asg = AutoScalingGroup(session)
asg.delete_asg(f"{job_name}_rdzv")
asg.delete_asg(f"{job_name}_worker")
def upload_script(session, specs_json, args):
script_path = args.script_path
s3_dest = args.s3_dest
if not s3_dest:
s3_bucket = specs_json["s3_bucket"]
s3_prefix = os.path.join(specs_json["s3_prefix"], getpass.getuser())
else:
s3_bucket = urlparse(s3_dest).netloc
s3_prefix = urlparse(s3_dest).path.strip("/")
log.info(f"Uploading: {script_path} to s3://{s3_bucket}/{s3_prefix}")
s3 = S3(session)
url = s3.cp(script_path, s3_bucket, s3_prefix)
log.info(f"Finished uploading to: {url}")
def list_hosts(session, specs_json, args):
job_name = args.job_name
asg = AutoScalingGroup(session)
asgs = [f"{job_name}_rdzv", f"{job_name}_worker"]
hosts = {}
for asg_name in asgs:
instance_ids, hostnames = asg.list_hostnames(asg_name)
hosts[asg_name] = zip(instance_ids, hostnames)
print(f"\n--------------------------------------------------------------")
for asg_name in hosts:
print(f"Hosts in {asg_name}:")
for i, host in enumerate(hosts[asg_name], start=1):
instance_id = host[0]
public_dns = host[1]
print(f" {i}) {instance_id} ({public_dns})")
print(f"--------------------------------------------------------------")
print("To SSH run:")
print(f"\taws ssm start-session --target <instance_id>")
print(f"--------------------------------------------------------------")
def configure(args):
"""
Configures petctl. Writes a simple json config file indicating
the specs file to use and the aws region to the petctl config directory
(default ~/.petctl). Prompts the user to input the specs file location
and aws region.
"""
while True:
specs_file = input(
"Absolute path to specs file (e.g. /home/${USER}/specs.json): "
)
if os.path.isfile(specs_file):
break
print(f"[{specs_file}] does not exist! Provide an existing path")
while True:
region = input("Default aws region to use (e.g. us-west-2): ")
if region:
break
print("AWS region cannot be empty!")
write_config_file(region, specs_file)
log.info(f"Configuration complete. petctl config file: {PETCTL_CONFIG_FILE}")
def setup(args):
"""
Similar to config but creates AWS resources using cfn template
and based on the cfn stack output, creates the specs file for the user,
then writes petctl config.
"""
region = args.region
s3_bucket_name = args.s3_bucket
efs_id = args.efs_id
os.makedirs(PETCTL_DIR, exist_ok=True)
session = auth.get_session(region)
cfn = CloudFormation(session)
cfn.create_specs_file(SPECS_FILE, s3_bucket_name, efs_id)
write_config_file(region, SPECS_FILE)
log.info(f"Setup complete. petctl config file: {PETCTL_CONFIG_FILE}")
def write_config_file(region, specs_file):
petctl_config = {"specs_file": specs_file, "region": region}
os.makedirs(PETCTL_DIR, exist_ok=True)
with open(PETCTL_CONFIG_FILE, "w+") as f:
json.dump(petctl_config, f, indent=4)
def load_configuration():
if os.path.isfile(PETCTL_CONFIG_FILE):
with open(PETCTL_CONFIG_FILE) as f:
return json.load(f)
else:
return {}
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
petctl_configs = load_configuration()
args = parse_arguments(sys.argv, **petctl_configs)
if args.command == "setup":
args = parse_arguments(sys.argv)
setup(args)
elif args.command == "configure":
configure(args)
else:
log.info(
f"{PETCTL_CONFIG_FILE} not found or is empty,"
f" consider running: petctl setup|configure"
)
region = args.region
specs_json = load_specs_json(args.specs_file)
session = auth.get_session(region)
args.func(session, specs_json, args)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
import tarfile as tar
import tempfile
log = logging.getLogger(__name__)
class S3:
def __init__(self, session):
self._session = session
self._s3 = session.client("s3")
def cp(self, target_path, bucket, key):
"""
Uploads target_path to s3://bucket/key. If the target_path is a file
then uploads to s3://bucket/key/file_name, if the target_path is a
directory, then a tarball is created with the contents of target_path
and uploaded to s3://bucket/key/dir_name.tar.gz. The tar is created as
if created by running the command:
cd target_path && tar xzf /tmp/$(basename target_path).tar.gz *
Returns the destination s3 url
"""
target_basename = os.path.basename(target_path)
if os.path.isdir(target_path):
tmpdir = tempfile.mkdtemp(prefix="petctl_")
tar_basename = f"{target_basename}.tar.gz"
tar_file = os.path.join(tmpdir, tar_basename)
log.info(f"Compressing {target_path} into {tar_basename}")
with tar.open(tar_file, "x:gz") as f:
f.add(target_path, arcname="", recursive=True)
dest_key = f"{key}/{tar_basename}"
target_file = tar_file
else:
tmpdir = None
dest_key = f"{key}/{target_basename}"
target_file = target_path
log.info(f"Uploading {target_file} to s3://{bucket}/{dest_key}")
self._s3.upload_file(target_file, bucket, dest_key)
if tmpdir:
log.info(f"Deleting tmp dir: {tmpdir}")
shutil.rmtree(tmpdir)
return f"s3://{bucket}/{dest_key}"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from enum import Enum, unique
from jinja2 import Template
from util import wait_for
log = logging.getLogger(__name__)
@unique
class Accelerator(Enum):
NONE = 0
GPU = 1
@classmethod
def get_accelerator(cls, instance_type):
"""
get_accelerator("p3.2xlarge") returns Accelerator.GPU
get_accelerator("i3.xlarge") returns Accelerator.NONE
"""
instance_accelerators = {
"g2": Accelerator.GPU,
"g3": Accelerator.GPU,
"g4": Accelerator.GPU,
"p2": Accelerator.GPU,
"p3": Accelerator.GPU,
}
instance_family = instance_type[0:2]
return instance_accelerators.get(instance_family, Accelerator.NONE)
@classmethod
def from_str(cls, accelerator_str):
"""
returns the enum Accelerator value from a string representation
"""
accelerators = {"none": Accelerator.NONE, "gpu": Accelerator.GPU}
return accelerators.get(accelerator_str.lower(), Accelerator.NONE)
def describe(self):
"""
Returns a string representation of the enum.
This method is intended to be used to label certain AWS
resources in their descriptions/names for informative purposes
e.g. launch template created for GPUs can be named as: torchelastic_gpu
"""
string_rep = {Accelerator.NONE.value(): "cpu", Accelerator.GPU.value(): "gpu"}
return string_rep.get(self, "unknown_accelerator")
class AutoScalingGroup:
def __init__(self, session):
self._session = session
self._asg = session.client("autoscaling")
self._ec2 = session.client("ec2")
def get_user_data(self, user_data_template, **kwargs):
if os.path.isabs(user_data_template):
user_data_path = user_data_template
else:
user_data_path = os.path.join(os.path.dirname(__file__), user_data_template)
with open(user_data_path) as f:
user_data_template = Template(f.read())
user_data = user_data_template.render(**kwargs)
return user_data
def get_ami_id(self, accelerator):
"""
Use EKS optimized AMI since it has everything we need pre-installed
"""
eks_owner_id = "602401143452"
eks_amis = {
Accelerator.NONE: "amazon-eks-node-1.14-v20190927",
Accelerator.GPU: "amazon-eks-gpu-node-1.14-v20190927",
}
res = self._ec2.describe_images(
Filters=[
{"Name": "owner-id", "Values": [eks_owner_id]},
{
"Name": "name",
"Values": [eks_amis.get(accelerator, Accelerator.NONE)],
},
]
)
images = res["Images"]
assert (
len(images) == 1
), f"Multiple EKS AMIs found for {self._session.aws_region()}"
return images[0]["ImageId"]
def create_launch_config(
self,
name,
instance_type,
instance_role,
user_data_template,
security_groups=None,
accelerator="gpu",
max_spot_price=None,
ebs_volume_gb=128,
**user_data_kwargs,
):
req = {
"LaunchConfigurationName": name,
"InstanceType": instance_type,
"IamInstanceProfile": instance_role,
"ImageId": self.get_ami_id(Accelerator.from_str(accelerator)),
"SecurityGroups": security_groups,
"AssociatePublicIpAddress": True,
"UserData": self.get_user_data(user_data_template, **user_data_kwargs),
"BlockDeviceMappings": [
{
"DeviceName": "/dev/xvda",
"Ebs": {
"VolumeSize": ebs_volume_gb,
"VolumeType": "gp2",
"DeleteOnTermination": True,
},
}
],
}
if max_spot_price:
req["SpotMaxPrice"] = str(max_spot_price)
log.info(f"Creating launch config: {name}")
self._asg.create_launch_configuration(**req)
def describe_launch_config(self, name):
res = self._asg.describe_launch_configurations(LaunchConfigurationNames=[name])
lcs = res["LaunchConfigurations"]
return lcs[0] if len(lcs) == 1 else None
def delete_launch_config(self, name):
if self.describe_launch_config(name):
log.info(f"Deleting asg launch config: {name}")
self._asg.delete_launch_configuration(LaunchConfigurationName=name)
def create_asg(self, name, size, min_size=None, max_size=None, **kwargs):
"""
Creates an asg. For specifications on kwargs see config/sample_specs.json
"""
if not min_size:
min_size = size
if not max_size:
max_size = size
assert min_size <= size <= max_size
kwargs["size"] = size
kwargs["min_size"] = min_size
kwargs["max_size"] = max_size
self.create_launch_config(name, **kwargs)
log.info(f"Creating autoscaling group: {name}")
self._asg.create_auto_scaling_group(
AutoScalingGroupName=name,
LaunchConfigurationName=name,
VPCZoneIdentifier=",".join(kwargs["subnets"]),
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=size,
)
def create_asg_sync(self, name, size, min_size=None, max_size=None, **kwargs):
self.create_asg(name, size, min_size, max_size, **kwargs)
_, hostnames = self.get_hostnames(name, size)
return hostnames
def describe_asg(self, name):
res = self._asg.describe_auto_scaling_groups(AutoScalingGroupNames=[name])
asgs = res["AutoScalingGroups"]
num_asgs = len(asgs)
return asgs[0] if num_asgs == 1 else None
def delete_asg(self, name):
if self.describe_asg(name):
log.info(f"Deleting autoscaling group: {name}")
self._asg.delete_auto_scaling_group(
AutoScalingGroupName=name, ForceDelete=True
)
for _ in wait_for(f"instances in {name} to terminate"):
if not self.describe_asg(name):
log.info(f"Deleted autoscaling group: {name}")
break
# launch config needs to be deleted after asg
self.delete_launch_config(name)
def list_hostnames(self, name):
return self.get_hostnames(name, 1)
def get_hostnames(self, name, size):
"""
Waits until the asg has at least <size> instances in "InService"
state and returns their public dns names.
"""
for _ in wait_for(f"autoscaling group: {name} to reach size >= {size}"):
asg_desc = self.describe_asg(name)
if not asg_desc:
return []
else:
instances = asg_desc["Instances"]
ready_instance_ids = [
e["InstanceId"]
for e in instances
if e["LifecycleState"] == "InService"
]
if len(ready_instance_ids) >= size:
paginator = self._ec2.get_paginator("describe_instances")
hostnames = []
instance_ids = []
for e in paginator.paginate(InstanceIds=ready_instance_ids):
for r in e["Reservations"]:
for i in r["Instances"]:
hostnames.append(i["PublicDnsName"])
instance_ids.append(i["InstanceId"])
return instance_ids, hostnames
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import boto3
class AwsSessionProvider:
"""
Provides AWS credentials in the form of boto3 Session.
This class may be sub-classed to provide custom methods
of getting aws_access_key_id and aws_secret_access_key.
Child classes are expected to provide overriding implementations
of the three `_get_*` methods below.
When used directly, it follows the default credential
lookup chain as documented in:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
"""
def get_session(self, region=None) -> boto3.Session:
access_key = self._get_access_key()
secret_key = self._get_secret_key()
session_token = self._get_session_token()
# either both access and secret keys are None
# or both are not None; just check one to assume
# the presence of the other
if access_key is None:
return boto3.session.Session()
else:
return boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name=region,
)
def _get_access_key(self):
"""
Returns the aws_access_key_id. Override when sub-classing.
"""
return None
def _get_secret_key(self):
"""
Returns the aws_secret_access_key. Override when sub-classing.
"""
return None
def _get_session_token(self):
"""
Returns the aws_session_token. Override when sub-classing.
"""
return None
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .session import AwsSessionProvider
def get_session(region):
return AwsSessionProvider().get_session(region)
try:
from .static_init import * # noqa: F401 F403
except ModuleNotFoundError:
pass
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributed.launcher.api import ( # noqa F401
elastic_launch,
launch_agent,
LaunchConfig,
)
|
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["LOGLEVEL"] = "INFO"
# Since logger initialized during imoprt statement
# the log level should be set first
from torch.distributed.run import main as run_main
def main(args=None) -> None:
run_main(args)
if __name__ == "__main__":
main()
|
from subprocess import check_output, STDOUT, CalledProcessError
import sys
import pytest
import glob
PYTHON_CODE_DIR = "python_code"
ALL_FILES = glob.glob(PYTHON_CODE_DIR + "/*.py")
@pytest.mark.parametrize('file_path', ALL_FILES)
def test_run_file(file_path):
if 'nvidia' in file_path:
# FIXME: NVIDIA models checkoints are on cuda
pytest.skip("temporarily disabled")
if 'pytorch_fairseq_translation' in file_path:
pytest.skip("temporarily disabled")
if 'ultralytics_yolov5' in file_path:
# FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse
pytest.skip("temporarily disabled")
if 'huggingface_pytorch-transformers' in file_path:
# FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse
pytest.skip("temporarily disabled")
if 'pytorch_fairseq_roberta' in file_path:
pytest.skip("temporarily disabled")
# We just run the python files in a separate sub-process. We really want a
# subprocess here because otherwise we might run into package versions
# issues: imagine script A that needs torchvivion 0.9 and script B that
# needs torchvision 0.10. If script A is run prior to script B in the same
# process, script B will still be run with torchvision 0.9 because the only
# "import torchvision" statement that counts is the first one, and even
# torchub sys.path shenanigans can do nothing about this. By creating
# subprocesses we're sure that all file executions are fully independent.
try:
# This is inspired (and heavily simplified) from
# https://github.com/cloudpipe/cloudpickle/blob/343da119685f622da2d1658ef7b3e2516a01817f/tests/testutils.py#L177
out = check_output([sys.executable, file_path], stderr=STDOUT)
print(out.decode())
except CalledProcessError as e:
raise RuntimeError(f"Script {file_path} errored with output:\n{e.output.decode()}")
|
valid_tags = ['vision',
'nlp',
'generative',
'audio',
'scriptable',
]
|
import argparse
import os
import glob
from urllib.request import urlopen, HTTPError
from tags import valid_tags
import yaml
import mistune
class ValidMD:
def __init__(self, filename):
self.filename = filename
self.required_user_fields = ['title', 'summary', 'image', 'author',
'tags', 'github-link', 'category']
self.optional_image_fields = ['featured_image_1', 'featured_image_2']
self.valid_tags = valid_tags
self.valid_categories = ['researchers', 'developers']
self.required_sections = ['Model Description']
self.optional_demo_link = ['demo-model-link']
def validate_tags(self, tags):
'''
Only allow tags in pre-defined set
'''
for t in tags:
if t not in self.valid_tags:
raise ValueError(
'Tag {} is not valid in {}. Valid tag set is {}'
.format(t, self.filename, self.valid_tags))
def validate_category(self, category):
'''
Only allow categories in predefined set
'''
if category not in self.valid_categories:
raise ValueError(
'Category {} is not valid in {}. Choose from {}'
.format(category, self.filename, self.valid_categories))
def validate_link(self, link):
'''
Make sure the github repo exists
'''
try:
urlopen(link)
except HTTPError:
raise ValueError('{} is not valid url in {}'
.format(link, self.filename))
def validate_image(self, image_name):
'''
Make sure reference image exists in images/
'''
images = [os.path.basename(i) for i in glob.glob('images/*')]\
+ ['pytorch-logo.png', 'no-image']
if image_name not in images:
raise ValueError('Image {} referenced in {} not found in images/'
.format(image_name, self.filename))
def validate_header(self, header):
'''
Make sure the header is in the required format
'''
assert header['layout'] == 'hub_detail'
assert header['background-class'] == 'hub-background'
assert header['body-class'] == 'hub'
for field in self.required_user_fields:
header[field] # assert that it exists
self.validate_tags(header['tags'])
self.validate_link(header['github-link'])
self.validate_image(header['image'])
self.validate_category(header['category'])
for field in self.optional_demo_link:
if field in header.keys():
self.validate_link(header[field])
for field in self.optional_image_fields:
if field in header.keys():
self.validate_image(header[field])
for k in header.keys():
if not k.endswith('-link'):
self.no_extra_colon(k, header[k])
def no_extra_colon(self, field, value):
# Jekyll doesn't build with extra colon in these fields
if ':' in str(value):
raise ValueError('Remove extra \':\' in field {} with value {} in file {}'
.format(field, value, self.filename))
def validate_markdown(self, markdown):
m = mistune.create_markdown(renderer=mistune.AstRenderer())
for block in m(markdown):
if block['type'] == 'heading':
# we dont want colon after section names
text_children = [c for c in block['children'] if c['type'] == 'text']
for c in text_children:
assert not c['text'].endswith(':')
if c['text'] in self.required_sections:
self.required_sections.remove(c['text'])
try:
assert len(self.required_sections) == 0
except AssertionError as e:
print("Missing required sections: {}".format(self.required_sections))
raise e
def check_markdown_file(self):
print('Checking {}...'.format(self.filename))
# separate header and markdown.
# Then, check header and markdown separately
header = []
markdown = []
header_read = False
with open(self.filename, 'r') as f:
for line in f:
if line.startswith('---'):
header_read = not header_read
continue
if header_read == True:
header += [line]
else:
markdown += [line]
# checks that it's valid yamp
header = yaml.safe_load(''.join(header))
assert header, "Failed to parse a valid yaml header"
self.validate_header(header)
# check markdown
markdown = "".join(markdown)
self.validate_markdown(markdown)
def sanity_check():
for f in glob.glob('*.md'):
# Skip documentation
if f in ('README.md', 'CONTRIBUTING.md', 'CODE_OF_CONDUCT.md'):
continue
ValidMD(f).check_markdown_file()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', default=None, help='filename')
args = parser.parse_args()
if args.file:
ValidMD(args.file).check_markdown_file()
else:
sanity_check()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch import Model
from elfgames.go.multiple_prediction import MultiplePrediction
class Model_Policy(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'bn',
'toggles batch norm',
True)
spec.addBoolOption(
'leaky_relu',
'toggles leaky ReLU',
True)
spec.addIntOption(
'num_layer',
'number of layers',
39)
spec.addIntOption(
'dim',
'model dimension',
128)
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.board_size = params["board_size"]
self.num_future_actions = params["num_future_actions"]
self.num_planes = params["num_planes"]
# print("#future_action: " + str(self.num_future_actions))
# print("#num_planes: " + str(self.num_planes))
# Simple method. multiple conv layers.
self.convs = []
self.convs_bn = []
last_planes = self.num_planes
for i in range(self.options.num_layer):
conv = nn.Conv2d(last_planes, self.options.dim, 3, padding=1)
conv_bn = (nn.BatchNorm2d(self.options.dim)
if self.options.bn
else lambda x: x)
setattr(self, "conv" + str(i), conv)
self.convs.append(conv)
setattr(self, "conv_bn" + str(i), conv_bn)
self.convs_bn.append(conv_bn)
last_planes = self.options.dim
self.final_conv = nn.Conv2d(
self.options.dim, self.num_future_actions, 3, padding=1)
# Softmax as the final layer
self.softmax = nn.Softmax(dim=1)
self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU()
def forward(self, x):
s = self._var(x["s"])
for conv, conv_bn in zip(self.convs, self.convs_bn):
s = conv_bn(self.relu(conv(s)))
output = self.final_conv(s)
pis = []
d = self.board_size * self.board_size
for i in range(self.num_future_actions):
pis.append(self.softmax(output[:, i].contiguous().view(-1, d)))
return dict(pis=pis, pi=pis[0])
# Format: key, [model, method]
Models = {
"df_policy": [Model_Policy, MultiplePrediction]
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from elf import GCWrapper, ContextArgs, MoreLabels
from elf.options import auto_import_options, PyOptionSpec
import _elfgames_go_inference as go
# from server_addrs import addrs
class Loader(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'preload_sgf',
'TODO: fill this help message in',
'')
spec.addIntOption(
'preload_sgf_move_to',
'TODO: fill this help message in',
-1)
spec.addStrOption(
'mode',
'TODO: fill this help message in',
"online")
spec.addBoolOption(
'actor_only',
'TODO: fill this help message in',
False)
spec.addIntOption(
'num_reset_ranking',
'TODO: fill this help message in',
5000)
spec.addBoolOption(
'verbose',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'print_result',
'TODO: fill this help message in',
False)
spec.addIntOption(
'data_aug',
'specify data augumentation, 0-7, -1 mean random',
-1)
spec.addIntOption(
'num_games_per_thread',
('For offline mode, it is the number of concurrent games per '
'thread, used to increase diversity of games; for selfplay mode, '
'it is the number of games played at each thread, and after that '
'we need to call restartAllGames() to resume.'),
-1)
spec.addIntOption(
'num_future_actions',
'TODO: fill this help message in',
1)
spec.addIntOption(
'move_cutoff',
'Cutoff ply in replay',
-1)
spec.addStrOption(
'mode',
'TODO: fill this help message in',
'online')
spec.addBoolOption(
'black_use_policy_network_only',
'TODO: fill this help message in',
False)
spec.addIntOption(
'ply_pass_enabled',
'TODO: fill this help message in',
0)
spec.addBoolOption(
'use_mcts',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'use_df_feature',
'TODO: fill this help message in',
False)
spec.addStrOption(
'dump_record_prefix',
'TODO: fill this help message in',
'')
spec.addFloatOption(
'resign_thres',
'TODO: fill this help message in',
0.0)
spec.addBoolOption(
'following_pass',
'TODO: fill this help message in',
False)
spec.addIntOption(
'gpu',
'TODO: fill this help message in',
-1)
spec.addBoolOption(
'parameter_print',
'TODO: fill this help message in',
True)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'batchsize2',
'batch size',
-1)
spec.addFloatOption(
'eval_winrate_thres',
'Win rate threshold for evalution',
0.55)
spec.addIntOption(
'suicide_after_n_games',
'return after n games have finished, -1 means it never ends',
-1)
spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels)))
return spec
@auto_import_options
def __init__(self, option_map):
self.context_args = ContextArgs(option_map)
self.more_labels = MoreLabels(option_map)
def _set_params(self):
co = go.ContextOptions()
self.context_args.initialize(co)
co.job_id = os.environ.get("job_id", "local")
if self.options.parameter_print:
co.print()
opt = go.GameOptions()
opt.seed = 0
opt.mode = self.options.mode
opt.use_mcts = self.options.use_mcts
opt.use_df_feature = self.options.use_df_feature
opt.dump_record_prefix = self.options.dump_record_prefix
opt.verbose = self.options.verbose
opt.black_use_policy_network_only = \
self.options.black_use_policy_network_only
opt.data_aug = self.options.data_aug
opt.ply_pass_enabled = self.options.ply_pass_enabled
opt.num_reset_ranking = self.options.num_reset_ranking
opt.move_cutoff = self.options.move_cutoff
opt.num_games_per_thread = self.options.num_games_per_thread
opt.following_pass = self.options.following_pass
opt.resign_thres = self.options.resign_thres
opt.preload_sgf = self.options.preload_sgf
opt.preload_sgf_move_to = self.options.preload_sgf_move_to
opt.print_result = self.options.print_result
self.max_batchsize = max(
self.options.batchsize, self.options.batchsize2) \
if self.options.batchsize2 > 0 \
else self.options.batchsize
co.batchsize = self.max_batchsize
GC = go.GameContext(co, opt)
if self.options.parameter_print:
print("**** Options ****")
print(opt.info())
print("*****************")
print("Version: ", GC.ctx().version())
return co, GC, opt
def initialize(self):
co, GC, opt = self._set_params()
params = GC.getParams()
if self.options.parameter_print:
print("Mode: ", opt.mode)
print("Num Actions: ", params["num_action"])
desc = {}
if self.options.mode == "online":
desc["human_actor"] = dict(
input=["s"],
reply=["pi", "a", "V"],
batchsize=1,
)
# Used for MCTS/Direct play.
desc["actor_black"] = dict(
input=["s"],
reply=["pi", "V", "a", "rv"],
timeout_usec=10,
batchsize=co.mcts_options.num_rollouts_per_batch
)
else:
raise "No such mode: " + self.options.mode
params.update(dict(
num_group=1 if self.options.actor_only else 2,
T=self.options.T,
))
self.more_labels.add_labels(desc)
return GCWrapper(
GC,
self.max_batchsize,
desc,
num_recv=2,
gpu=(self.options.gpu
if (self.options.gpu is not None and self.options.gpu >= 0)
else None),
use_numpy=False,
params=params,
verbose=self.options.parameter_print)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from elf import GCWrapper, ContextArgs, MoreLabels
from elf.options import auto_import_options, PyOptionSpec
import _elfgames_go as go
from server_addrs import addrs
class Loader(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'preload_sgf',
'TODO: fill this help message in',
'')
spec.addIntOption(
'preload_sgf_move_to',
'TODO: fill this help message in',
-1)
spec.addBoolOption(
'actor_only',
'TODO: fill this help message in',
False)
spec.addStrListOption(
'list_files',
'Provide a list of json files for offline training',
[])
spec.addIntOption(
'port',
'TODO: fill this help message in',
5556)
spec.addStrOption(
'server_addr',
'TODO: fill this help message in',
'')
spec.addStrOption(
'server_id',
'TODO: fill this help message in',
'')
spec.addIntOption(
'q_min_size',
'TODO: fill this help message in',
10)
spec.addIntOption(
'q_max_size',
'TODO: fill this help message in',
1000)
spec.addIntOption(
'num_reader',
'TODO: fill this help message in',
50)
spec.addIntOption(
'num_reset_ranking',
'TODO: fill this help message in',
5000)
spec.addIntOption(
'client_max_delay_sec',
'Maximum amount of allowed delays in sec. If the client '
'didn\'t respond after that, we think it is dead.',
1200)
spec.addBoolOption(
'verbose',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'keep_prev_selfplay',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'print_result',
'TODO: fill this help message in',
False)
spec.addIntOption(
'data_aug',
'specify data augumentation, 0-7, -1 mean random',
-1)
spec.addIntOption(
'ratio_pre_moves',
('how many moves to perform in each thread, before we use the '
'data to train the model'),
0)
spec.addFloatOption(
'start_ratio_pre_moves',
('how many moves to perform in each thread, before we use the '
'first sgf file to train the model'),
0.5)
spec.addIntOption(
'num_games_per_thread',
('For offline mode, it is the number of concurrent games per '
'thread, used to increase diversity of games; for selfplay mode, '
'it is the number of games played at each thread, and after that '
'we need to call restartAllGames() to resume.'),
-1)
spec.addIntOption(
'expected_num_clients',
'Expected number of clients',
-1
)
spec.addIntOption(
'num_future_actions',
'TODO: fill this help message in',
1)
spec.addIntOption(
'move_cutoff',
'Cutoff ply in replay',
-1)
spec.addStrOption(
'mode',
'TODO: fill this help message in',
'online')
spec.addBoolOption(
'black_use_policy_network_only',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'white_use_policy_network_only',
'TODO: fill this help message in',
False)
spec.addIntOption(
'ply_pass_enabled',
'TODO: fill this help message in',
0)
spec.addBoolOption(
'use_mcts',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'use_mcts_ai2',
'TODO: fill this help message in',
False)
spec.addFloatOption(
'white_puct',
'PUCT for white when it is > 0.0. If it is -1 then we use'
'the same puct for both side (specified by mcts_options).'
'A HACK to use different puct for different model. Should'
'be replaced by a more systematic approach.',
-1.0)
spec.addIntOption(
'white_mcts_rollout_per_batch',
'white mcts rollout per batch',
-1)
spec.addIntOption(
'white_mcts_rollout_per_thread',
'white mcts rollout per thread',
-1)
spec.addBoolOption(
'use_df_feature',
'TODO: fill this help message in',
False)
spec.addStrOption(
'dump_record_prefix',
'TODO: fill this help message in',
'')
spec.addIntOption(
'policy_distri_cutoff',
'TODO: fill this help message in',
0)
spec.addFloatOption(
'resign_thres',
'TODO: fill this help message in',
0.0)
spec.addBoolOption(
'following_pass',
'TODO: fill this help message in',
False)
spec.addIntOption(
'selfplay_timeout_usec',
'TODO: fill this help message in',
0)
spec.addIntOption(
'gpu',
'TODO: fill this help message in',
-1)
spec.addBoolOption(
'policy_distri_training_for_all',
'TODO: fill this help message in',
False)
spec.addBoolOption(
'parameter_print',
'TODO: fill this help message in',
True)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'batchsize2',
'batch size',
-1)
spec.addIntOption(
'T',
'number of timesteps',
6)
spec.addIntOption(
'selfplay_init_num',
('Initial number of selfplay games to generate before training a '
'new model'),
2000)
spec.addIntOption(
'selfplay_update_num',
('Additional number of selfplay games to generate after a model '
'is updated'),
1000)
spec.addBoolOption(
'selfplay_async',
('Whether to use async mode in selfplay'),
False)
spec.addIntOption(
'eval_num_games',
('number of evaluation to be performed to decide whether a model '
'is better than the other'),
400)
spec.addFloatOption(
'eval_winrate_thres',
'Win rate threshold for evalution',
0.55)
spec.addIntOption(
'eval_old_model',
('If specified, then we directly switch to evaluation mode '
'between the loaded model and the old model specified by this '
'switch'),
-1)
spec.addStrOption(
'eval_model_pair',
('If specified for df_selfplay.py, then the two models will be '
'evaluated on this client'),
'')
spec.addStrOption(
'comment',
'Comment for this run',
'')
spec.addBoolOption(
'cheat_eval_new_model_wins_half',
'When enabled, in evaluation mode, when the game '
'finishes, the player with the most recent model gets 100%% '
'win rate half of the time.'
'This is used to test the framework',
False)
spec.addBoolOption(
'cheat_selfplay_random_result',
'When enabled, in selfplay mode the result of the game is random'
'This is used to test the framework',
False)
spec.addIntOption(
'suicide_after_n_games',
'return after n games have finished, -1 means it never ends',
-1)
spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels)))
return spec
@auto_import_options
def __init__(self, option_map):
self.context_args = ContextArgs(option_map)
self.more_labels = MoreLabels(option_map)
def _set_params(self):
co = go.ContextOptions()
self.context_args.initialize(co)
co.job_id = os.environ.get("job_id", "local")
if self.options.parameter_print:
co.print()
opt = go.GameOptions()
opt.seed = 0
opt.list_files = self.options.list_files
if self.options.server_addr:
opt.server_addr = self.options.server_addr
else:
if self.options.server_id:
opt.server_addr = addrs[self.options.server_id]
opt.server_id = self.options.server_id
else:
opt.server_addr = ""
opt.server_id = ""
opt.port = self.options.port
opt.mode = self.options.mode
opt.use_mcts = self.options.use_mcts
opt.use_mcts_ai2 = self.options.use_mcts_ai2
opt.use_df_feature = self.options.use_df_feature
opt.dump_record_prefix = self.options.dump_record_prefix
opt.policy_distri_training_for_all = \
self.options.policy_distri_training_for_all
opt.verbose = self.options.verbose
opt.black_use_policy_network_only = \
self.options.black_use_policy_network_only
opt.white_use_policy_network_only = \
self.options.white_use_policy_network_only
opt.data_aug = self.options.data_aug
opt.ratio_pre_moves = self.options.ratio_pre_moves
opt.q_min_size = self.options.q_min_size
opt.q_max_size = self.options.q_max_size
opt.num_reader = self.options.num_reader
opt.start_ratio_pre_moves = self.options.start_ratio_pre_moves
opt.ply_pass_enabled = self.options.ply_pass_enabled
opt.num_future_actions = self.options.num_future_actions
opt.num_reset_ranking = self.options.num_reset_ranking
opt.move_cutoff = self.options.move_cutoff
opt.policy_distri_cutoff = self.options.policy_distri_cutoff
opt.num_games_per_thread = self.options.num_games_per_thread
opt.following_pass = self.options.following_pass
opt.resign_thres = self.options.resign_thres
opt.preload_sgf = self.options.preload_sgf
opt.preload_sgf_move_to = self.options.preload_sgf_move_to
opt.keep_prev_selfplay = self.options.keep_prev_selfplay
opt.expected_num_clients = self.options.expected_num_clients
opt.white_puct = self.options.white_puct
opt.white_mcts_rollout_per_batch = \
self.options.white_mcts_rollout_per_batch
opt.white_mcts_rollout_per_thread = \
self.options.white_mcts_rollout_per_thread
opt.client_max_delay_sec = self.options.client_max_delay_sec
opt.print_result = self.options.print_result
opt.selfplay_init_num = self.options.selfplay_init_num
opt.selfplay_update_num = self.options.selfplay_update_num
opt.selfplay_async = self.options.selfplay_async
opt.eval_num_games = self.options.eval_num_games
opt.eval_thres = self.options.eval_winrate_thres
opt.cheat_eval_new_model_wins_half = \
self.options.cheat_eval_new_model_wins_half
opt.cheat_selfplay_random_result = \
self.options.cheat_selfplay_random_result
self.max_batchsize = max(
self.options.batchsize, self.options.batchsize2) \
if self.options.batchsize2 > 0 \
else self.options.batchsize
co.batchsize = self.max_batchsize
GC = go.GameContext(co, opt)
if self.options.parameter_print:
print("**** Options ****")
print(opt.info())
print("*****************")
print("Version: ", GC.ctx().version())
return co, GC, opt
def initialize(self):
co, GC, opt = self._set_params()
params = GC.getParams()
if self.options.parameter_print:
print("Mode: ", opt.mode)
print("Num Actions: ", params["num_action"])
desc = {}
if self.options.mode == "online":
desc["human_actor"] = dict(
input=["s"],
reply=["pi", "a", "V"],
batchsize=1,
)
# Used for MCTS/Direct play.
desc["actor_black"] = dict(
input=["s"],
reply=["pi", "V", "a", "rv"],
timeout_usec=10,
batchsize=co.mcts_options.num_rollouts_per_batch
)
elif self.options.mode == "selfplay":
# Used for MCTS/Direct play.
desc["actor_black"] = dict(
input=["s"],
reply=["pi", "V", "a", "rv"],
batchsize=self.options.batchsize,
timeout_usec=self.options.selfplay_timeout_usec,
)
desc["actor_white"] = dict(
input=["s"],
reply=["pi", "V", "a", "rv"],
batchsize=self.options.batchsize2
if self.options.batchsize2 > 0
else self.options.batchsize,
timeout_usec=self.options.selfplay_timeout_usec,
)
desc["game_end"] = dict(
batchsize=1,
)
desc["game_start"] = dict(
batchsize=1,
input=["black_ver", "white_ver"],
reply=None
)
elif self.options.mode == "train" or \
self.options.mode == "offline_train":
desc["train"] = dict(
input=["s", "offline_a", "winner", "mcts_scores", "move_idx",
"selfplay_ver"],
reply=None
)
desc["train_ctrl"] = dict(
input=["selfplay_ver"],
reply=None,
batchsize=1
)
else:
raise "No such mode: " + self.options.mode
params.update(dict(
num_group=1 if self.options.actor_only else 2,
T=self.options.T,
))
self.more_labels.add_labels(desc)
return GCWrapper(
GC,
self.max_batchsize,
desc,
num_recv=2,
gpu=(self.options.gpu
if (self.options.gpu is not None and self.options.gpu >= 0)
else None),
use_numpy=False,
params=params,
verbose=self.options.parameter_print)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch import add_err
from rlpytorch.trainer import topk_accuracy
class MultiplePrediction:
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'multipred_backprop',
'Whether to backprop the total loss',
True)
return spec
@auto_import_options
def __init__(self, option_map):
self.policy_loss = nn.NLLLoss().cuda()
self.value_loss = nn.MSELoss().cuda()
def update(self, mi, batch, stats):
''' Update given batch '''
# Current timestep.
state_curr = mi["model"](batch)
total_policy_loss = None
eps = 1e-6
targets = batch["offline_a"]
if "pis" not in state_curr:
state_curr["pis"] = [state_curr["pi"]]
for i, pred in enumerate(state_curr["pis"]):
if i == 0:
prec1, prec5 = topk_accuracy(
pred.data, targets[:, i].contiguous(), topk=(1, 5))
stats["top1_acc"].feed(prec1[0])
stats["top5_acc"].feed(prec5[0])
# backward.
loss = self.policy_loss(
(pred + eps).log(), Variable(targets[:, i]))
stats["loss" + str(i)].feed(loss.data[0])
total_policy_loss = add_err(total_policy_loss, loss / (i + 1))
total_value_loss = None
if "V" in state_curr and "winner" in batch:
total_value_loss = self.value_loss(
state_curr["V"], Variable(batch["winner"]))
stats["total_policy_loss"].feed(total_policy_loss.data[0])
if total_value_loss is not None:
stats["total_value_loss"].feed(total_value_loss.data[0])
total_loss = total_policy_loss + total_value_loss
else:
total_loss = total_policy_loss
stats["total_loss"].feed(total_loss.data[0])
if self.options.multipred_backprop:
total_loss.backward()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch import Model
from elfgames.go.mcts_prediction import MCTSPrediction
class Model_PolicyValue(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'bn',
'toggles batch norm',
True)
spec.addBoolOption(
'leaky_relu',
'toggles leaky ReLU',
True)
spec.addIntOption(
'num_block',
'number of blocks',
20)
spec.addIntOption(
'dim',
'model dimension',
128)
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.board_size = params["board_size"]
self.num_future_actions = params["num_future_actions"]
self.num_planes = params["num_planes"]
# print("#future_action: " + str(self.num_future_actions))
# print("#num_planes: " + str(self.num_planes))
# Network structure of AlphaGo Zero
# https://www.nature.com/nature/journal/v550/n7676/full/nature24270.html
# Simple method. multiple conv layers.
self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU()
self.convs = []
last_planes = self.num_planes
self.init_conv = self._conv_layer(last_planes)
for i in range(self.options.num_block):
conv_lower = self._conv_layer()
conv_upper = self._conv_layer(relu=False)
setattr(self, "conv_lower" + str(i), conv_lower)
setattr(self, "conv_upper" + str(i), conv_upper)
self.convs.append((conv_lower, conv_upper))
self.pi_final_conv = self._conv_layer(self.options.dim, 2, 1)
self.value_final_conv = self._conv_layer(self.options.dim, 1, 1)
d = self.board_size ** 2
self.pi_linear = nn.Linear(d * 2, d)
self.value_linear1 = nn.Linear(d, 256)
self.value_linear2 = nn.Linear(256, 1)
# Softmax as the final layer
self.logsoftmax = nn.LogSoftmax(dim=1)
self.tanh = nn.Tanh()
def _conv_layer(
self,
input_channel=None,
output_channel=None,
kernel=3,
relu=True):
if input_channel is None:
input_channel = self.options.dim
if output_channel is None:
output_channel = self.options.dim
layers = []
layers.append(nn.Conv2d(
input_channel,
output_channel,
kernel,
padding=(kernel // 2),
))
if self.options.bn:
layers.append(nn.BatchNorm2d(output_channel))
if relu:
layers.append(self.relu)
return nn.Sequential(*layers)
def forward(self, x):
s = self._var(x["s"])
s = self.init_conv(s)
for conv_lower, conv_upper in self.convs:
s1 = conv_lower(s)
s1 = conv_upper(s1)
s1 = s1 + s
s = self.relu(s1)
d = self.board_size ** 2
pi = self.pi_final_conv(s)
pi = self.pi_linear(pi.view(-1, d * 2))
logpi = self.logsoftmax(pi)
pi = logpi.exp()
V = self.value_final_conv(s)
V = self.relu(self.value_linear1(V.view(-1, d)))
V = self.value_linear2(V)
V = self.tanh(V)
return dict(logpi=logpi, pi=pi, V=V)
# Format: key, [model, method]
Models = {
"df": [Model_PolicyValue, MCTSPrediction]
}
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import torch.nn as nn
import torch.distributed as dist
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch import Model
from elfgames.go.mcts_prediction import MCTSPrediction
from elfgames.go.multiple_prediction import MultiplePrediction
class Block(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'bn',
'toggles batch norm',
True)
spec.addBoolOption(
'leaky_relu',
'toggles leaky ReLU',
False)
spec.addFloatOption(
'bn_momentum',
'batch norm momentum (pytorch style)',
0.1)
spec.addFloatOption(
"bn_eps",
"batch norm running vars eps",
1e-5)
spec.addIntOption(
'dim',
'model dimension',
128)
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU()
self.conv_lower = self._conv_layer()
self.conv_upper = self._conv_layer(relu=False)
def _conv_layer(
self,
input_channel=None,
output_channel=None,
kernel=3,
relu=True):
if input_channel is None:
input_channel = self.options.dim
if output_channel is None:
output_channel = self.options.dim
layers = []
layers.append(nn.Conv2d(
input_channel,
output_channel,
kernel,
padding=(kernel // 2),
))
if self.options.bn:
layers.append(
nn.BatchNorm2d(output_channel,
momentum=(self.options.bn_momentum or None),
eps=self.options.bn_eps))
if relu:
layers.append(self.relu)
return nn.Sequential(*layers)
def forward(self, s):
s1 = self.conv_lower(s)
s1 = self.conv_upper(s1)
s1 = s1 + s
s = self.relu(s1)
return s
class GoResNet(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_block',
'number of resnet blocks',
20)
spec.merge(Block.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.blocks = []
for _ in range(self.options.num_block):
self.blocks.append(Block(option_map, params))
self.resnet = nn.Sequential(*self.blocks)
def forward(self, s):
return self.resnet(s)
class Model_PolicyValue(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'bn',
'toggles batch norm',
True)
spec.addBoolOption(
'leaky_relu',
'toggles leaky ReLU',
False)
spec.addFloatOption(
'bn_momentum',
'batch norm momentum (pytorch style)',
0.1)
spec.addIntOption(
'num_block',
'number of blocks',
20)
spec.addIntOption(
'dim',
'model dimension',
128)
spec.addBoolOption(
'use_data_parallel',
'TODO: fill this in',
False)
spec.addBoolOption(
'use_data_parallel_distributed',
'TODO: fill this in',
False)
spec.addIntOption(
'dist_rank',
'TODO: fill this in',
-1)
spec.addIntOption(
'dist_world_size',
'TODO: fill this in',
-1)
spec.addStrOption(
'dist_url',
'TODO: fill this in',
'')
spec.addIntOption(
'gpu',
'which gpu to use',
-1)
spec.merge(GoResNet.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.board_size = params["board_size"]
self.num_future_actions = params["num_future_actions"]
self.num_planes = params["num_planes"]
# print("#future_action: " + str(self.num_future_actions))
# print("#num_planes: " + str(self.num_planes))
# Network structure of AlphaGo Zero
# https://www.nature.com/nature/journal/v550/n7676/full/nature24270.html
# Simple method. multiple conv layers.
self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU()
last_planes = self.num_planes
self.init_conv = self._conv_layer(last_planes)
self.pi_final_conv = self._conv_layer(self.options.dim, 2, 1)
self.value_final_conv = self._conv_layer(self.options.dim, 1, 1)
d = self.board_size ** 2
# Plus 1 for pass.
self.pi_linear = nn.Linear(d * 2, d + 1)
self.value_linear1 = nn.Linear(d, 256)
self.value_linear2 = nn.Linear(256, 1)
# Softmax as the final layer
self.logsoftmax = nn.LogSoftmax(dim=1)
self.tanh = nn.Tanh()
self.resnet = GoResNet(option_map, params)
if torch.cuda.is_available() and self.options.gpu is not None:
self.init_conv.cuda(self.options.gpu)
self.resnet.cuda(self.options.gpu)
if self.options.use_data_parallel:
if self.options.gpu is not None:
self.init_conv = nn.DataParallel(
self.init_conv, output_device=self.options.gpu)
self.resnet = nn.DataParallel(
self.resnet, output_device=self.options.gpu)
self._check_and_init_distributed_model()
def _check_and_init_distributed_model(self):
if not self.options.use_data_parallel_distributed:
return
if not dist.is_initialized():
world_size = self.options.dist_world_size
url = self.options.dist_url
rank = self.options.dist_rank
# This is for SLURM's special use case
if rank == -1:
rank = int(os.environ.get("SLURM_NODEID"))
print("=> Distributed training: world size: {}, rank: {}, URL: {}".
format(world_size, rank, url))
dist.init_process_group(backend="nccl",
init_method=url,
rank=rank,
world_size=world_size)
# Initialize the distributed data parallel model
master_gpu = self.options.gpu
if master_gpu is None or master_gpu < 0:
raise RuntimeError("Distributed training requires "
"to put the model on the GPU, but the GPU is "
"not given in the argument")
# This is needed for distributed model since the distributed model
# initialization will require the model be on the GPU, even though
# the later code will put the same model on the GPU again with
# self.options.gpu, so this should be ok
# self.resnet.cuda(master_gpu)
self.init_conv = nn.parallel.DistributedDataParallel(
self.init_conv)
self.resnet = nn.parallel.DistributedDataParallel(
self.resnet)
def _conv_layer(
self,
input_channel=None,
output_channel=None,
kernel=3,
relu=True):
if input_channel is None:
input_channel = self.options.dim
if output_channel is None:
output_channel = self.options.dim
layers = []
layers.append(nn.Conv2d(
input_channel,
output_channel,
kernel,
padding=(kernel // 2)
))
if self.options.bn:
layers.append(
nn.BatchNorm2d(output_channel,
momentum=(self.options.bn_momentum or None),
eps=self.options.bn_eps))
if relu:
layers.append(self.relu)
return nn.Sequential(*layers)
def prepare_cooldown(self):
try:
for module in self.modules():
if module.__class__.__name__.startswith('BatchNorm'):
module.reset_running_stats()
except Exception as e:
print(e)
print("The module doesn't have method 'reset_running_stats', "
"skipping. Please set bn_momentum to 0.1"
"(for cooldown = 50) in this case")
def forward(self, x):
s = self._var(x["s"])
s = self.init_conv(s)
s = self.resnet(s)
d = self.board_size ** 2
pi = self.pi_final_conv(s)
pi = self.pi_linear(pi.view(-1, d * 2))
logpi = self.logsoftmax(pi)
pi = logpi.exp()
V = self.value_final_conv(s)
V = self.relu(self.value_linear1(V.view(-1, d)))
V = self.value_linear2(V)
V = self.tanh(V)
return dict(logpi=logpi, pi=pi, V=V)
# Format: key, [model, method]
Models = {
"df_pred": [Model_PolicyValue, MultiplePrediction],
"df_kl": [Model_PolicyValue, MCTSPrediction]
}
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch.autograd import Variable
import elf.logging as logging
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch.trainer.timer import RLTimer
class MCTSPrediction(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'backprop',
'Whether to backprop the total loss',
True)
return spec
@auto_import_options
def __init__(self, option_map):
self.policy_loss = nn.KLDivLoss().cuda()
self.value_loss = nn.MSELoss().cuda()
self.logger = logging.getIndexedLogger(
'elfgames.go.MCTSPrediction-', '')
self.timer = RLTimer()
def update(self, mi, batch, stats, use_cooldown=False, cooldown_count=0):
''' Update given batch '''
self.timer.restart()
if use_cooldown:
if cooldown_count == 0:
mi['model'].prepare_cooldown()
self.timer.record('prepare_cooldown')
# Current timestep.
state_curr = mi['model'](batch)
self.timer.record('forward')
if use_cooldown:
self.logger.debug(self.timer.print(1))
return dict(backprop=False)
targets = batch["mcts_scores"]
logpi = state_curr["logpi"]
pi = state_curr["pi"]
# backward.
# loss = self.policy_loss(logpi, Variable(targets)) * logpi.size(1)
loss = - (logpi * Variable(targets)
).sum(dim=1).mean() # * logpi.size(1)
stats["loss"].feed(float(loss))
total_policy_loss = loss
entropy = (logpi * pi).sum() * -1 / logpi.size(0)
stats["entropy"].feed(float(entropy))
stats["blackwin"].feed(
float((batch["winner"] > 0.0).float().sum()) /
batch["winner"].size(0))
total_value_loss = None
if "V" in state_curr and "winner" in batch:
total_value_loss = self.value_loss(
state_curr["V"].squeeze(), Variable(batch["winner"]))
stats["total_policy_loss"].feed(float(total_policy_loss))
if total_value_loss is not None:
stats["total_value_loss"].feed(float(total_value_loss))
total_loss = total_policy_loss + total_value_loss
else:
total_loss = total_policy_loss
stats["total_loss"].feed(float(total_loss))
self.timer.record('feed_stats')
if self.options.backprop:
total_loss.backward()
self.timer.record('backward')
self.logger.debug(self.timer.print(1))
return dict(backprop=True)
else:
self.logger.debug(self.timer.print(1))
return dict(backprop=False)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .model_base import Model
from .model_loader import ModelLoader, load_env
from .model_interface import ModelInterface
from .sampler import Sampler
from .methods import ActorCritic, RNNActorCritic
from .runner import EvalIters, EvalItersBasic, SingleProcessRun
from .trainer import Trainer, Evaluator, LSTMTrainer
from .methods import add_err, PolicyGradient, DiscountedReward, ValueMatcher
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import OrderedDict
from copy import deepcopy
from time import sleep
import torch
import torch.nn as nn
from torch.autograd import Variable
torch.backends.cudnn.benchmark = True
class Model(nn.Module):
''' Base class for an RL model, it is a wrapper for ``nn.Module``'''
def __init__(self, option_map, params):
"""Initialize model with ``args``.
Set ``step`` to ``0`` and ``volatile`` to ```false``.
``step`` records the number of times the weight has been updated.
``volatile`` indicates that the Variable should be used in
inference mode, i.e. don't save the history.
"""
super(Model, self).__init__()
self.option_map = option_map
self.params = params
self.step = 0
self.volatile = False
def clone(self, gpu=None):
"""Deep copy an existing model.
``options``, ``step`` and ``state_dict`` are copied.
Args:
gpu(int): gpu id to be put the model on
Returns:
Cloned model
"""
model = type(self)(self.option_map, self.params)
model.load_state_dict(deepcopy(self.state_dict()))
model.step = self.step
if gpu is not None:
model.cuda(gpu)
return model
def set_volatile(self, volatile):
"""Set model to ``volatile``.
Args:
volatile(bool): indicating that the Variable should be used in
inference mode, i.e. don't save the history.
"""
self.volatile = volatile
def _var(self, x):
''' Convert tensor x to a pytorch Variable.
Returns:
Variable for x
'''
if not isinstance(x, Variable):
return Variable(x, volatile=self.volatile)
else:
return x
def before_update(self):
"""Customized operations for each model before update.
To be extended.
"""
pass
def save(self, filename, num_trial=10):
"""Save current model, step and args to ``filename``
Args:
filename(str): filename to be saved.
num_trial(int): maximum number of retries to save a model.
"""
# Avoid calling the constructor by doing self.clone()
# deepcopy should do it
state_dict = deepcopy(self).cpu().state_dict()
# Note that the save might experience issues, so if we encounter
# errors, try a few times and then give up.
content = {
'state_dict': state_dict,
'step': self.step,
'options': vars(self.options),
}
for i in range(num_trial):
try:
torch.save(content, filename)
return
except BaseException:
sleep(1)
print(
"Failed to save %s after %d trials, giving up ..." %
(filename, num_trial))
def load(
self, filename,
omit_keys=[], replace_prefix=[], check_loaded_options=True):
''' Load current model, step and args from ``filename``
Args:
filename(str): model filename to load from
omit_keys(list): list of omitted keys.
Sometimes model will have extra keys and weights
(e.g. due to extra tasks during training).
We should omit them;
otherwise loading will not work.
'''
data = torch.load(filename)
if isinstance(data, OrderedDict):
self.load_state_dict(data)
else:
for k in omit_keys:
del data["state_dict"][k + ".weight"]
del data["state_dict"][k + ".bias"]
sd = data["state_dict"]
keys = list(sd.keys())
for key in keys:
# Should be commented out for PyTorch > 0.40
# if key.endswith("num_batches_tracked"):
# del sd[key]
# continue
for src, dst in replace_prefix:
if key.startswith(src):
# print(f"Src=\"{src}\", Dst=\"{dst}\"")
sd[dst + key[len(src):]] = sd[key]
del sd[key]
self.load_state_dict(sd)
self.step = data.get("step", 0)
self.filename = os.path.realpath(data.get("filename", filename))
if check_loaded_options:
# Ensure that for options defined in both the current model
# options and the loaded model options, the values match between
# current model and loaded model.
loaded_options = data.get('options', {})
current_options = vars(self.options)
for option_name in \
(set(loaded_options.keys()) & set(current_options.keys())):
if loaded_options[option_name] != current_options[option_name]:
raise ValueError(
f'Discrepancy between current and loaded model '
f'parameter: {option_name} '
f'loaded: {loaded_options[option_name]}, '
f'current: {current_options[option_name]}'
)
def load_from(self, model):
''' Load from an existing model. State is not deep copied.
To deep copy the model, uss ``clone``.
'''
if hasattr(model, 'option_map'):
self.option_map = model.option_map
if hasattr(model, 'params'):
self.params = deepcopy(model.params)
self.load_state_dict(model.state_dict())
self.step = model.step
def inc_step(self):
''' increment the step.
``step`` records the number of times the weight has been updated.'''
self.step += 1
def signature(self):
'''Get model's signature.
Returns:
the model's signature string, specified by step.
'''
return "Model[%d]" % self.step
def prepare_cooldown(self):
"""Prepare for "cooldown" forward passes (useful for batchnorm)."""
pass
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import pprint
import random
import time
import torch
import warnings
from elf.options import import_options, PyOptionSpec
from elf import logging
from .model_interface import ModelInterface
from .sampler import Sampler
from .utils.fp16_utils import FP16Model
def load_module(mod):
"""Load a python module."""
module = importlib.import_module(mod)
print(module, mod)
return module
class ModelLoader(object):
"""Class to load a previously saved model."""
@classmethod
def get_option_spec(cls, model_class=None, model_idx=None):
spec = PyOptionSpec()
spec.addStrOption(
'load',
'load model',
'')
spec.addStrListOption(
'onload',
('functions to call after loading. e.g., reset,zero_first_layer. '
'These functions are specified in the model'),
[])
spec.addStrListOption(
'omit_keys',
'omitted keys when loading',
[])
spec.addStrListOption(
'replace_prefix',
'replace prefix',
[])
spec.addIntOption(
'gpu',
'which GPU to use',
-1)
spec.addBoolOption(
'check_loaded_options',
'Toggles consistency check of loaded vs. current model options.',
True)
spec.addBoolOption(
'use_fp16',
'use_fp16',
False)
spec.addFloatOption(
'load_model_sleep_interval',
('If zero, has no effect. If positive, then before loading the '
'model, we will sleep for an interval of '
'duration (secs) ~ Uniform[0, load_model_sleep_interval]'),
0.0)
if model_class is not None and hasattr(model_class, 'get_option_spec'):
spec.merge(model_class.get_option_spec())
idx_suffix = '' if model_idx is None else str(model_idx)
spec.addPrefixSuffixToOptionNames('', idx_suffix)
return spec
def __init__(self, option_map, model_class, model_idx=None, logger=None):
"""Initialize ModelLoader.
Loading will fail if extra keys are not put in ``omit_keys``
Args:
model_class(class): class name of the model
model_idx(int): index of the model to be loaded.
There may be multiple models in an
`ModelInterface` to load.
"""
import_options(
self, option_map, self.get_option_spec(model_class, model_idx))
if logger is not None:
self.logger = logger
else:
self.logger = logging.getIndexedLogger(
'rlpytorch.model_loader.ModelLoader-',
f'-model_index{model_idx}')
self.option_map_for_model = option_map.clone()
self.model_class = model_class
self.model_idx = model_idx
self._on_get_args = lambda *args, **kwargs: None
option_spec = self.get_option_spec(model_class, model_idx)
option_names = set(option_spec.getOptionNames())
model_option_spec = model_class.get_option_spec()
model_option_names = set(model_option_spec.getOptionNames())
# Here, the names in option_names are still possibly suffixed with
# the model_idx. If so, we need to remove this suffix.
model_options_to_load = {}
for option_name in option_names:
if model_idx is not None and option_name.endswith(str(model_idx)):
# This is the name without the model_idx suffix
orig_option_name = option_name[:-len(str(model_idx))]
value = getattr(self.options, option_name)
setattr(self.options, orig_option_name, value)
delattr(self.options, option_name)
if orig_option_name in model_option_names:
model_options_to_load[orig_option_name] = value
if model_options_to_load:
self.option_map_for_model.loadOptionDict(
model_options_to_load)
def load_model(self, params):
"""Actually loads the model with initialized args.
Call onload funtions if needed.
Args:
params(dict): additinoal parameters to be put into args.
"""
if self.options.load_model_sleep_interval > 1e-7:
interval = random.random() * self.options.load_model_sleep_interval
self.logger.info(f'Sleeping for {interval} seconds')
time.sleep(interval + 1e-7)
# Initialize models.
model = self.model_class(self.option_map_for_model, params)
if self.options.load:
self.logger.info(f'Loading model from {self.options.load}')
if self.options.omit_keys:
self.logger.info(f'Omitting keys {self.options.omit_keys}')
if self.options.replace_prefix:
replace_prefix = [
item.split(",")
for item in self.options.replace_prefix
]
self.logger.info(
f'replace_prefix for state dict: {replace_prefix}')
else:
replace_prefix = []
model.load(
self.options.load,
omit_keys=self.options.omit_keys,
replace_prefix=replace_prefix,
check_loaded_options=self.options.check_loaded_options)
self.logger.info(
f'Finished loading model from {self.options.load}')
if self.options.onload:
for func in self.options.onload:
try:
getattr(model, func)()
self.logger.info('Called function {func!s} for model')
except BaseException:
self.logger.info('Calling function {func!s} failed!')
raise
if self.options.use_fp16:
old_step = model.step
model = FP16Model(self.option_map_for_model, params, model)
model.step = old_step
if torch.cuda.is_available() and \
self.options.gpu is not None and \
self.options.gpu >= 0:
model.cuda(self.options.gpu)
return model
def _on_get_args(self, *args, **kwargs):
warnings.warn(
('_on_get_args is deprecated, get rid of this as soon as old '
'model files are no longer needed'),
DeprecationWarning)
def load_env(
envs,
num_models=None,
overrides=None,
additional_to_load=None):
"""Load envs.
Envs will be specified as environment variables. Specifically, the
environment variables ``game``, ``model_file`` and ``model`` are
required.
``additional_to_load`` is a dict with the following format:
{'variable_name': (option_spec, callable)}
For each element in ``additional_to_load``, ``load_env`` will parse
the ``option_spec``, pass the resulting option map to ``callable``,
and store the result of ``callable`` in the return value
(under the key ``name``).
Returns:
env: dict of
``game`` : game module
``method``: Learning method used
``model_loaders``: loaders for model
"""
logger = logging.getIndexedLogger('rlpytorch.model_loader.load_env', '')
logger.info('Loading env')
game_loader_class = load_module(envs["game"]).Loader
model_file = load_module(envs["model_file"])
# TODO This is not good, need to fix.
if len(model_file.Models[envs["model"]]) == 2:
model_class, method_class = model_file.Models[envs["model"]]
sampler_class = Sampler
else:
model_class, method_class, sampler_class = \
model_file.Models[envs["model"]]
overrides = dict(overrides) if overrides else {}
overrides.update(getattr(model_file, "Overrides", {}))
option_spec = PyOptionSpec()
option_spec.merge(PyOptionSpec.fromClasses((
logging.GlobalLoggingConfigurator,
game_loader_class,
method_class,
sampler_class,
ModelInterface,
)))
if num_models is None:
option_spec.merge(ModelLoader.get_option_spec(model_class))
else:
for i in range(num_models):
option_spec.merge(
ModelLoader.get_option_spec(model_class, model_idx=i))
if additional_to_load:
for additional_option_spec, _ in additional_to_load.values():
option_spec.merge(additional_option_spec)
option_map = option_spec.parse(overrides=overrides)
global_logger_configurator = logging.GlobalLoggingConfigurator(option_map)
global_logger_configurator.configure()
pretty_option_str = pprint.pformat(option_map.getOptionDict(), width=50)
logger.info(f'Parsed options: {pretty_option_str}')
game = game_loader_class(option_map)
method = method_class(option_map)
sampler = sampler_class(option_map)
mi = ModelInterface(option_map)
# You might want multiple models loaded.
if num_models is None:
model_loaders = [ModelLoader(option_map, model_class)]
else:
model_loaders = [ModelLoader(option_map, model_class, model_idx=i)
for i in range(num_models)]
env = dict(
game=game,
method=method,
sampler=sampler,
model_loaders=model_loaders,
mi=mi,
)
if additional_to_load:
for name, (_, option_map_callable) in additional_to_load.items():
env[name] = option_map_callable(option_map)
logger.info('Finished loading env')
return env
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
import torch
import torch.cuda
import torch.optim
from elf.options import auto_import_options, PyOptionSpec
# All model must provide .outputs and .preprocess
# E.g., .outputs = { "Q" : self.final_linear_layer }
# .preprocess = lambda self, x: downsample(x)
class ModelInterface(object):
"""An interface for the model to receive intermediate results from
forward passes."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'opt_method',
'optimization method (adam or sgd)',
'adam')
spec.addFloatOption(
'lr',
'learning rate',
1e-3)
spec.addFloatOption(
'adam_eps',
'Adam epsilon',
1e-3)
spec.addFloatOption(
'momentum',
'momentum parameter',
0.9)
spec.addFloatOption(
'weight_decay',
'weight decay rate',
0.0)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for models and optimizers.
``models`` is a dict that can contain multiple models in a
single `ModelInterface`.
For each model in ``models``, there is an optimizer in
``optimizers`` in correspondence, using ``torch.optim.Adam``.
"""
self.option_map = option_map
self.models = {}
self.old_models = deque()
self.optimizers = {}
def clone(self, gpu=None):
"""Clone the state for the model interface, including
``models`` and ``optimizers``.
Args:
gpu(int): gpu id to be put the model on
Returns:
cloned `ModelInterface`.
"""
mi = ModelInterface(self.option_map)
for key, model in self.models.items():
mi.models[key] = model.clone(gpu=gpu)
if key in self.optimizers:
# Same parameters.
mi.optimizers[key] = torch.optim.Adam(
mi.models[key].parameters())
new_optim = mi.optimizers[key]
old_optim = self.optimizers[key]
new_optim_params = new_optim.param_groups[0]
old_optim_params = old_optim.param_groups[0]
# Copy the parameters.
for k in new_optim_params.keys():
if k != "params":
new_optim_params[k] = old_optim_params[k]
# Copy the state
'''
new_optim.state = { }
for k, v in old_optim.state.items():
if isinstance(v, (int, float, str)):
new_optim.state[k] = v
else:
new_optim.state[k] = v.clone()
if gpu is not None:
new_optim.state[k] = new_optim.state[k].cuda(gpu)
'''
return mi
def __contains__(self, key):
return key in self.models
def add_model(
self,
key,
model,
copy=False,
cuda=False,
gpu_id=None,
opt=False,
params={}):
'''Add a model to `ModelInterface`.
Args:
key(str): key in ``self.models``.
model(`Model`): the model to be added.
copy(bool): indicate if the model needs to be deep copied.
cuda(bool): indicate if model needs to be converted to cuda.
gpu_id(int): gpu index.
opt(bool): Whether you want your model to be optimized
(weights to be updated).
params(dict): an dict of parameters for optimizers.
Returns:
Raise exception if key is already in ``self.models``,
None if model is successfully added.
'''
if key in self.models:
raise("ModelInterface: key[%s] is already present!" % key)
# New model.
if gpu_id is not None and gpu_id >= 0:
with torch.cuda.device(gpu_id):
self.models[key] = model.clone() if copy else model
else:
self.models[key] = model.clone() if copy else model
if cuda:
if gpu_id is not None and gpu_id >= 0:
self.models[key].cuda(gpu_id)
else:
self.models[key].cuda()
def set_default(params, ks, arg_ks=None):
if arg_ks is None:
arg_ks = [None] * len(ks)
for k, arg_k in zip(ks, arg_ks):
if arg_k is None:
arg_k = k
params[k] = params.get(k, getattr(self.options, arg_k))
curr_model = self.models[key]
if opt or len(params) > 0:
set_default(
params,
["lr", "opt_method", "adam_eps", "momentum", "weight_decay"])
method = params["opt_method"]
curr_model.train()
if method == "adam":
self.optimizers[key] = torch.optim.Adam(
curr_model.parameters(), lr=params["lr"],
betas=(0.9, 0.999), eps=params["adam_eps"],
weight_decay=params["weight_decay"])
elif method == "sgd":
self.optimizers[key] = torch.optim.SGD(
curr_model.parameters(),
lr=params["lr"],
momentum=params["momentum"],
weight_decay=params["weight_decay"])
else:
raise ValueError(
"Optimization method %s is not supported! " %
params["opt_method"])
return True
def update_model(self, key, model, save_old_model=False):
''' If the key is present, update an old model. Does not deep copy it.
If the key is not present, add it (no deep copy).
Args:
key(str): the key in ``models`` to be updated
model(`Model`): updated model
'''
# print("Updating model " + key)
if key not in self.models:
self.add_model(key, model)
return
if save_old_model:
self.old_models.append(self.models[key].clone().cpu())
if len(self.old_models) > 20:
self.old_models.popleft()
self.models[key].load_from(model)
def remove_model(self, key):
del self.models[key]
if key in self.optimizers:
del self.optimizers[key]
def average_model(self, key, model):
"""Average the model params from ``self.models[key]`` and ``model``,
and update to ``self.models[key]``.
Args:
key(str): the key in ``models``
model(Model): the model containing the parameters to update
"""
for param, other_param in zip(
self.models[key].parameters(), model.parameters()):
param.data += other_param.data.cuda(param.data.get_device())
param.data /= 2
def copy(self, dst_key, src_key):
''' Deep copy a model from src_key to dst_key in ``self.models``
Args:
dst_key(str): destination key in ``self.models``
src_key(str): source key in ``self.models``
'''
assert dst_key in self.models, \
f'ModelInterface: dst_key = {dst_key} cannot be found'
assert src_key in self.models, \
f'ModelInterface: src_key = {src_key} cannot be found'
self.update_model(dst_key, self.models[src_key].clone())
''' Usage:
record = interface(input)
Then record["Q"] will be the Q-function given the input.
'''
def zero_grad(self):
''' Zero the gradient for all ``optimizers`` '''
for k, optimizer in self.optimizers.items():
optimizer.zero_grad()
def update_weights(self):
"""For each optimizer, call before_update for all the models,
then update the weights and increment the step for the model."""
for k, optimizer in self.optimizers.items():
self.models[k].before_update()
optimizer.step()
self.models[k].inc_step()
def __getitem__(self, key):
''' Get an item associated with ``key`` from ``self.models``'''
return self.models[key]
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from .utils import add_err
from .discounted_reward import DiscountedReward
from .policy_gradient import PolicyGradient
from .value_matcher import ValueMatcher
class RNNActorCritic(object):
"""RNN actor-critic model."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'value_node',
'name of the value node',
'V')
spec.merge(PyOptionSpec.fromClasses(
(PolicyGradient, DiscountedReward, ValueMatcher)
))
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization of RNNActorCritic component methods:
`PolicyGradient`, `DiscountedReward`, and `ValueMatcher`.
"""
self.discounted_reward = DiscountedReward()
self.pg = PolicyGradient()
self.value_matcher = ValueMatcher()
def update(self, mi, batch, hiddens, stats):
m = mi["model"]
value_node = self.options.value_node
T = batch["a"].size(0)
h = Variable(hiddens)
hs = []
ss = []
# Forward to compute LSTM.
for t in range(0, T - 1):
if t > 0:
term = Variable(1.0 - batch["terminal"][t].float()).view(-1, 1)
h.register_hook(lambda grad: grad.mul(term))
state_curr = m(batch.hist(t), h)
h = m.transition(state_curr["h"], batch["a"][t])
hs.append(h)
ss.append(state_curr)
R = ss[-1][value_node].squeeze().data
self.discounted_reward.setR(R, stats)
err = None
# Backward to compute gradient descent.
for t in range(T - 2, -1, -1):
state_curr = ss[t]
# go through the sample and get the rewards.
bht = batch.hist(t)
V = state_curr[value_node].squeeze()
R = self.discounted_reward.feed(
dict(r=batch["r"][t], terminal=batch["terminal"][t]),
stats)
err = add_err(
err,
self.pg.feed(
R - V.data,
state_curr,
bht,
stats,
old_pi_s=bht))
err = add_err(err, self.value_matcher.feed(
{value_node: V, "target": R}, stats))
stats["cost"].feed(err.data[0] / (T - 1))
err.backward()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from .policy_gradient import PolicyGradient
from .discounted_reward import DiscountedReward
from .value_matcher import ValueMatcher
from .utils import add_err
class ActorCritic(object):
"""An actor critic model."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'value_node',
'name of the value node',
'V')
spec.merge(PyOptionSpec.fromClasses(
(PolicyGradient, DiscountedReward, ValueMatcher)
))
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization of ActorCritic component methods:
`PolicyGradient`, `DiscountedReward`, and `ValueMatcher`.
"""
self.discounted_reward = DiscountedReward()
self.pg = PolicyGradient()
self.value_matcher = ValueMatcher()
def update(self, mi, batch, stats):
"""Actor critic model update.
Feed stats for later summarization.
Args:
mi(`ModelInterface`): mode interface used
batch(dict): batch of data. Keys in a batch:
``s``: state,
``r``: immediate reward,
``terminal``: if game is terminated
stats(`Stats`): Feed stats for later summarization.
"""
m = mi["model"]
value_node = self.options.value_node
T = batch["s"].size(0)
state_curr = m(batch.hist(T - 1))
self.discounted_reward.setR(
state_curr[value_node].squeeze().data, stats)
err = None
for t in range(T - 2, -1, -1):
bht = batch.hist(t)
state_curr = m.forward(bht)
# go through the sample and get the rewards.
V = state_curr[value_node].squeeze()
R = self.discounted_reward.feed(
dict(r=batch["r"][t], terminal=batch["terminal"][t]),
stats=stats)
policy_err = self.pg.feed(
R - V.data, state_curr, bht, stats, old_pi_s=bht)
err = add_err(err, policy_err)
err = add_err(err, self.value_matcher.feed(
{value_node: V, "target": R}, stats))
stats["cost"].feed(err.data[0] / (T - 1))
err.backward()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from .utils import average_norm_clip
class ValueMatcher(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addFloatOption(
'grad_clip_norm',
'gradient norm clipping',
0.0)
spec.addStrOption(
'value_node',
'name of the value node',
'V')
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization of value matcher.
Initialize value loss to be ``nn.SmoothL1Loss``.
"""
self.value_loss = nn.SmoothL1Loss().cuda()
def _reg_backward(self, v):
''' Register the backward hook. Clip the gradient if necessary.'''
grad_clip_norm = self.options.grad_clip_norm
if grad_clip_norm > 1e-20:
def bw_hook(grad_in):
grad = grad_in.clone()
if grad_clip_norm is not None:
average_norm_clip(grad, grad_clip_norm)
return grad
v.register_hook(bw_hook)
def feed(self, batch, stats):
"""
One iteration of value match.
nabla_w Loss(V - target)
Keys in a batch:
``V`` (variable): value
``target`` (tensor): target value.
Inputs that are of type Variable can backpropagate.
Feed to stats: predicted value and value error
Returns:
value_err
"""
V = batch[self.options.value_node]
value_err = self.value_loss(V, Variable(batch["target"]))
self._reg_backward(V)
stats["predicted_" + self.options.value_node].feed(V.data[0])
stats[self.options.value_node + "_err"].feed(value_err.data[0])
return value_err
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .actor_critic import ActorCritic
from .rnn_actor_critic import RNNActorCritic
from .q_learning import Q_learning
from .policy_gradient import PolicyGradient
from .discounted_reward import DiscountedReward
from .value_matcher import ValueMatcher
from .utils import add_err
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
class DiscountedReward(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addFloatOption(
'discount',
'exponential discount rate',
0.99)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization of discounted reward."""
pass
def setR(self, R, stats):
"""Set rewards and feed to stats."""
self.R = R
stats["init_reward"].feed(R.mean())
def feed(self, batch, stats):
"""Update discounted reward and feed to stats.
Keys in a batch:
``r`` (tensor): immediate reward.
``terminal`` (tensor): whether the current game has terminated.
Feed to stats: immediate reward and accumulated reward
"""
r = batch["r"]
term = batch["terminal"]
# Compute the reward.
self.R = self.R * self.options.discount + r
# If we see any terminal signal, break the reward backpropagation
# chain.
for i, terminal in enumerate(term):
if terminal:
self.R[i] = r[i]
stats["reward"].feed(r.mean())
stats["acc_reward"].feed(self.R.mean())
return self.R
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
def average_norm_clip(grad, clip_val):
'''
Compute the norm and clip it if necessary.
The first dimension will be batchsize.
Args:
grad(Tensor): the gradient
clip_val(float): value to clip to
'''
batchsize = grad.size(0)
avg_l2_norm = 0.0
for i in range(batchsize):
avg_l2_norm += grad[i].data.norm()
avg_l2_norm /= batchsize
if avg_l2_norm > clip_val:
# print("l2_norm: %.5f clipped to %.5f" % (avg_l2_norm, clip_val))
grad *= clip_val / avg_l2_norm
def accumulate(acc, new):
''' accumulate by the same key in a list of dicts
Args:
acc(dict): the dict to accumulate to
new(dict): new dict entry
Returns:
A new dict containing the accumulated sums of each key.
'''
ret = {k: new[k] if a is None else a + new[k]
for k, a in acc.items() if k in new}
ret.update({k: v for k, v in new.items() if not (k in acc)})
return ret
def add_err(overall_err, new_err):
''' Add ``new_err`` to ``overall_err``
Args:
overall_err(float): summed overall error
new_err(float): new error
'''
if overall_err is None:
return new_err
else:
overall_err += new_err
return overall_err
def add_stats(stats, key, value):
''' Feed ``value`` to ``stats[key]``'''
if stats:
stats[key].feed(value)
def check_terminals(has_terminal, batch):
''' Check if the environment sent a terminal signal '''
# Block backpropagation if we go pass a terminal node.
for i, terminal in enumerate(batch["terminal"]):
if terminal:
has_terminal[i] = True
def check_terminals_anyT(has_terminal, batch, T):
''' Check if any of ``batch[t], t <= T`` is terminal'''
for t in range(T):
check_terminals(has_terminal, batch[t])
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from .utils import accumulate, add_err, average_norm_clip
class PolicyGradient(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addFloatOption(
'entropy_ratio',
'the entropy ratio we put on PolicyGradient',
0.01)
spec.addFloatOption(
'grad_clip_norm',
'gradient norm clipping',
0.0)
spec.addFloatOption(
'min_prob',
'mininal probability used in training',
1e-6)
spec.addFloatOption(
'ratio_clamp',
'maximum importance sampling ratio',
10.0)
spec.addStrListOption(
'policy_action_nodes',
'the entropy ratio we put on PolicyGradient',
['pi,a'])
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialize the policy gradient.
Specifically, initialize policy loss to be an ``nn.NLLLoss`` and
parse ``policy_action_nodes``.
"""
self.policy_loss = nn.NLLLoss().cuda()
self.policy_action_nodes = []
for node_specifier in self.options.policy_action_nodes:
policy, _, action = node_specifier.partition(",")
self.policy_action_nodes.append((policy, action))
def _compute_one_policy_entropy_err(self, pi, a):
"""Compute policy error and entropy error for one.
Pass in ``min_prob`` to avoid ``Nan`` in logrithms.
Returns:
dict of
``logpi``: log policy
``policy_err``: polict error
``entropy_err``: entropy error
"""
batchsize = a.size(0)
# Add normalization constant
logpi = (pi + self.options.min_prob).log()
# TODO Seems that logpi.clone() won't create a few hook list.
# See https://github.com/pytorch/pytorch/issues/2601
logpi2 = (pi + self.options.min_prob).log()
# Get policy. N * #num_actions
policy_err = self.policy_loss(logpi, a)
entropy_err = (logpi2 * pi).sum() / batchsize
return dict(
logpi=logpi,
policy_err=policy_err,
entropy_err=entropy_err)
def _compute_policy_entropy_err(self, pi, a):
"""Compute policy error and entropy error for a batch.
Pass in ``min_prob`` to avoid ``Nan`` in logrithms.
Returns:
dict of
``logpi``: log policy
``policy_err``: polict error
``entropy_err``: entropy error
"""
errs = {}
if isinstance(pi, list):
# Action map, and we need compute the error one by one.
for i, pix in enumerate(pi):
for j, pixy in enumerate(pix):
errs = accumulate(
errs,
self._compute_one_policy_entropy_err(
pixy, a[:, i, j], self.options.min_prob)
)
else:
errs = self._compute_one_policy_entropy_err(pi, a)
return errs
def _reg_backward(self, v, pg_weights):
"""Register the backward hook. Clip the gradient if necessary."""
def bw_hook(grad_in):
grad_clip_norm = self.options.grad_clip_norm
# this works only on pytorch 0.2.0
grad = grad_in.mul(pg_weights.view(-1, 1))
# import pdb
# pdb.set_trace()
if grad_clip_norm > 1e-20:
average_norm_clip(grad, grad_clip_norm)
return grad
v.register_hook(bw_hook)
def feed(self, Q, pi_s, actions, stats, old_pi_s=dict()):
"""One iteration of policy gradient.
rho nabla_w log p_w(a|s) Q + entropy_ratio * nabla H(pi(.|s))
Args:
Q(tensor): estimated return
actions(tensor): action
pi_s(variable): policy
old_pi_s(tensor, optional): old policy, in order to
get importance factor.
If you specify multiple policies, then all the log prob of these
policies are added, and their importance factors are multiplied.
Feed to stats: policy error and nll error
"""
# We need to set it beforehand.
# Note that the samples we collect might be off-policy, so we need
# to do importance sampling.
pg_weights = Q.clone()
policy_err = None
entropy_err = None
log_pi_s = []
for pi_node, a_node in self.policy_action_nodes:
pi = pi_s[pi_node]
a = actions[a_node].squeeze()
if pi_node in old_pi_s:
old_pi = old_pi_s[pi_node].squeeze()
# Cap it.
clamped_ratios = torch.clamp(
pi.data.div(old_pi), max=self.options.ratio_clamp)
coeff = clamped_ratios.gather(1, a.view(-1, 1)).squeeze()
pg_weights.mul_(coeff)
# There is another term (to compensate clamping), but we omit
# it for now.
# Compute policy gradient error:
errs = self._compute_policy_entropy_err(pi, Variable(a))
policy_err = add_err(policy_err, errs["policy_err"])
entropy_err = add_err(entropy_err, errs["entropy_err"])
log_pi_s.append(errs["logpi"])
stats["nll_" + pi_node].feed(errs["policy_err"].data[0])
stats["entropy_" + pi_node].feed(errs["entropy_err"].data[0])
for log_pi in log_pi_s:
self._reg_backward(log_pi, Variable(pg_weights))
if len(self.policy_action_nodes) > 1:
stats["total_nll"].feed(policy_err.data[0])
stats["total_entropy"].feed(entropy_err.data[0])
return policy_err + entropy_err * self.options.entropy_ratio
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from .discounted_reward import DiscountedReward
from .utils import add_err
class Q_learning(object):
"""A Q-learning model."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'a_node',
'action node',
'a')
spec.addStrOption(
'q_node',
'Q node',
'Q')
spec.merge(DiscountedReward.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization of q learning."""
self.discounted_reward = DiscountedReward(option_map)
self.q_loss = nn.SmoothL1Loss().cuda()
def update(self, mi, batch, stats):
''' Actor critic model update.
Feed stats for later summarization.
Args:
mi(`ModelInterface`): mode interface used
batch(dict): batch of data. Keys in a batch:
``s``: state,
``r``: immediate reward,
``terminal``: if game is terminated
stats(`Stats`): Feed stats for later summarization.
'''
m = mi["model"]
Q_node = self.options.Q_node
a_node = self.options.a_node
T = batch["s"].size(0)
state_curr = m(batch.hist(T - 1))
Q = state_curr[Q_node].squeeze().data
V = Q.max(1)
self.discounted_reward.setR(V, stats)
err = None
for t in range(T - 2, -1, -1):
bht = batch.hist(t)
state_curr = m.forward(bht)
# go through the sample and get the rewards.
Q = state_curr[Q_node].squeeze()
a = state_curr[a_node].squeeze()
R = self.discounted_reward.feed(
dict(r=batch["r"][t], terminal=batch["terminal"][t]),
stats=stats)
# Then you want to match Q value here.
# Q: batchsize * #action.
Q_sel = Q.gather(1, a.view(-1, 1)).squeeze()
err = add_err(err, nn.L2Loss(Q_sel, Variable(R)))
stats["cost"].feed(err.data[0] / (T - 1))
err.backward()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .single_process import SingleProcessRun
# from .multi_process import MultiProcessRun
from .eval_iters import EvalIters, EvalItersBasic
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from ..stats import Stats
class EvalItersBasic(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_eval',
'number of games to evaluate',
500)
spec.addBoolOption(
'tqdm',
'toggle tqdm visualization',
False)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Evaluation."""
self.count = 0
def add_count(self):
self.count += 1
def update_count(self, n):
self.count = n
def iters(self):
''' loop through until we reach ``num_eval`` games.
if use ``tqdm``, also visualize the progress bar.
Print stats summary in the end.
'''
if self.options.tqdm:
import tqdm
tq = tqdm.tqdm(total=self.options.num_eval, ncols=50)
while self.count < self.options.num_eval:
old_n = self.count
yield old_n
diff = self.count - old_n
tq.update(diff)
tq.close()
else:
while self.count < self.options.num_eval:
yield self.count
class EvalIters(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_eval',
'number of games to evaluate',
500)
spec.addBoolOption(
'tqdm',
'toggle tqdm visualization',
False)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Evaluation."""
self.stats = Stats(option_map, "eval")
self.eval_iter_basic = EvalItersBasic(option_map)
def _on_get_args(self, _):
self.stats.reset()
def iters(self):
''' loop through until we reach ``num_eval`` games.
if use ``tqdm``, also visualize the progress bar.
Print stats summary in the end.
'''
if self.options.tqdm:
import tqdm
tq = tqdm.tqdm(total=self.options.num_eval, ncols=50)
while self.stats.count_completed() < self.options.num_eval:
old_n = self.stats.count_completed()
yield old_n
diff = self.stats.count_completed() - old_n
tq.update(diff)
tq.close()
else:
while self.stats.count_completed() < self.options.num_eval:
yield self.stats.count_completed()
self.stats.print_summary()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import tqdm
from elf.options import auto_import_options, PyOptionSpec
from .parameter_server import SharedData
class MultiProcessRun(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_minibatch',
'number of minibatches',
5000)
spec.addIntOption(
'num_episode',
'number of episodes',
10000)
spec.addIntOption(
'num_process',
'number of processes',
2)
spec.addBoolOption(
'tqdm',
'toggle tqdm visualization',
False)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for MultiProcessRun."""
pass
def setup(self, GC, mi, remote_init, remote_process,
episode_start=None, episode_summary=None, args=None):
''' Setup for MultiProcessRun.
Args:
GC(`GameContext`): Game Context
mi(`ModelInterface`): ModelInterface
remote_init(func): Callbacks for remote initialization,
used in SharedData
remote_process(func): Callbacks for remote process,
used in SharedData
episode_start(func): operations to perform before each episode
episode_summary(func): operations to summarize after each epidsode
args(dict): Additional arguments for class `SharedData`
'''
self.GC = GC
self.episode_start = episode_start
self.episode_summary = episode_summary
self.remote_init = remote_init
self.remote_process = remote_process
self.shared_data = \
SharedData(self.options.num_process, mi, GC.inputs[1][0],
cb_remote_initialize=remote_init,
cb_remote_batch_process=remote_process, args=args)
self.total_train_count = 0
self.success_train_count = 0
def _train(self, batch):
# Send to remote for remote processing.
# TODO Might have issues when batch is on GPU.
self.total_train_count += 1
success = self.shared_data.send_batch(batch)
if success:
self.success_train_count += 1
def run(self):
"""Main training loop. Initialize Game Context and looping the
required episodes.
Call episode_start and episode_summary before and after each episode
if necessary.
Visualize with a progress bar if ``tqdm`` is set.
Print training stats after each episode.
In the end, print summary for game context and stop it.
"""
self.GC.reg_callback("train", self._train)
self.GC.start()
for k in range(self.options.num_episode):
if self.episode_start is not None:
self.episode_start(k)
if self.options.tqdm:
iterator = tqdm.trange(self.options.num_minibatch, ncols=50)
else:
iterator = range(self.options.num_minibatch)
for i in iterator:
self.GC.run()
if self.episode_summary is not None:
self.episode_summary(k)
print(
"Train stat: (%.2f) %d/%d" %
(float(
self.success_train_count) /
self.total_train_count,
self.success_train_count,
self.total_train_count))
self.GC.stop()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# XXX hack fix path
import os
import random
import sys
import torch.multiprocessing as _mp
import utils_elf
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'elf'))
mp = _mp.get_context('spawn')
'''
Usage:
In process main function, run the following and then
you get a shared model.
if rank == 0:
model = build_model(with_cuda)
else:
model = None
model = param_server.sync_model(rank, model)
'''
class Cond:
''' Wrapper for `Condition` class from torch multiprocessing'''
def __init__(self):
self.cond = mp.Condition()
def wait(self):
self.cond.acquire()
self.cond.wait()
self.cond.release()
def wait_noblock(self):
self.cond.acquire()
self.cond.wait(0)
self.cond.release()
def notify(self):
self.cond.acquire()
self.cond.notify()
self.cond.release()
class ParameterServer(object):
''' ParameterServer to handle updates in the model concurrently '''
def __init__(self, n_processes):
''' Initialization.
Args:
n_processes: number of processes.
'''
self.queue = mp.Queue()
self.n_processes = n_processes
self.barrier = mp.Barrier(n_processes)
# For update signal.
self.send_done = Cond()
self.recv_done = Cond()
def __getstate__(self):
return (
self.queue,
self.barrier,
self.n_processes,
self.send_done,
self.recv_done)
def __setstate__(self, state):
self.queue, self.barrier, self.n_processes, \
self.send_done, self.recv_done = \
state
def server_send_model(self, mi):
"""Send the model to others and starts to wait.
Finish waiting if all client receives the model.
Args:
mi(`ModelInterface`): model interface to send
"""
assert mi is not None
for i in range(self.n_processes - 1):
self.queue.put(mi)
self._server_shared_mi = mi
self.barrier.wait()
def client_receive_model(self):
"""Receive model from the queue.
Finish waiting if all client receives the model.
Returns:
`ModelInterface` shared in clients.
"""
mi = self.queue.get()
# clone the gradients to break the sharing
for _, model in mi.models.items():
for param in model.parameters():
if param.grad is not None:
param._grad = param.grad.clone()
self.barrier.wait()
self._client_shared_mi = mi
return self._client_shared_mi
def server_update_model(self, key, new_mi, noblock=False):
''' Update shared model in the server, wait until all clients receive.
Args:
key(str): the key in ``models`` to update
new_mi(`ModelInterface`): new model interface to update
noblock(bool): indicates if updating models block other threads.
Default is blocking.
'''
# if recv is not done, skip it.
if noblock:
try:
self.recv_done.wait_noblock()
except BaseException:
# The recv is not done yet. Cannot send.
return False
else:
self.recv_done.wait()
self._server_shared_mi.update_model(key, new_mi)
# Then wait until other people have received.
self.send_done.notify()
return True
def client_refresh_model(self, gpu=None, skip=False):
''' Clone updated shared model from the server.
Args:
gpu(int): gpu index
skip(bool): if we skip this model.
Will return ``None`` if set to ``True``
Returns:
refreshed model.
'''
# First wait until we are synced up.
self.send_done.wait()
if not skip:
mi = self._client_shared_mi.clone(gpu=gpu)
else:
mi = None
self.recv_done.notify()
return mi
class SharedData:
def __init__(self, total_process, mi, batch_template,
cb_remote_initialize=None,
cb_remote_batch_process=None,
args=None):
''' Initialize `SharedData` class with a few hooks
Args:
total_process: number of processes
mi: ModelInterface
batch_template:
cb_remote_initialize: Callbacks for remote Initialization
cb_remote_batch_process: Callbacks for remote process
args: additional arguments
'''
self.server = ParameterServer(total_process)
self.cb_remote_initialize = cb_remote_initialize
self.cb_remote_batch_process = cb_remote_batch_process
self.args = args
# def get_gpu_id(i): return i + 1
def get_gpu_id(i): return 0
# Share only training batches.
shared_batches = []
cvs_send = []
cvs_recv = []
qs = []
for i in range(total_process - 1):
# gpu_id = get_gpu_id(i)
# shared_batches.append(
# cpu2gpu(all_batches[train_idx][0], gpu=gpu_id))
shared_batches.append(utils_elf.pin_clone(batch_template))
qs.append(mp.Queue(1))
qs[-1].put(shared_batches[i])
cvs_send.append(Cond())
cvs_recv.append(Cond())
self.cvs_send = cvs_send
self.cvs_recv = cvs_recv
self.shared_batches = shared_batches
self.qs = qs
self.b = mp.Barrier(total_process)
self.optimizers = [
mp.process(
target=self.process_main, args=(
i, get_gpu_id(i))) for i in range(
total_process - 1)]
for optimizer in self.optimizers:
optimizer.start()
# Wait until all models have received the shared memory.
self.b.wait()
self.server.server_send_model(mi)
def process_main(self, i, gpu_id):
''' Main process. Transportation between cpu and gpu.
Args:
i(int): process id
gpu_id(int): gpu id
'''
batch = self.qs[i].get()
self.b.wait()
batch_gpu = utils_elf.cpu2gpu(batch, gpu=gpu_id)
mi = self.server.client_receive_model()
context = self.cb_remote_initialize(mi, gpu_id, self.args)
print(
"[%d] Context initialization completed, gpu_id = %d.. " %
(i, gpu_id))
# Ready.
self.cvs_send[i].notify()
while True:
self.cvs_recv[i].wait()
utils_elf.transfer_cpu2gpu(batch, batch_gpu, non_blocking=True)
self.cvs_send[i].notify()
self.cb_remote_batch_process(context, batch_gpu)
def send_batch(self, batch):
''' Send batch to a cpu process
Args:
batch(dict): batch data
'''
process_idx = random.randint(0, len(self.shared_batches) - 1)
try:
self.cvs_send[process_idx].wait_noblock()
utils_elf.transfer_cpu2cpu(batch, self.shared_batches[process_idx])
self.cvs_recv[process_idx].notify()
return True
except Exception as e:
# print("Failed to send batch to %d" % process_idx)
# print(type(e))
# print(e.args)
# print(e)
return False
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
from elf.options import auto_import_options, PyOptionSpec
class SingleProcessRun(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_minibatch',
'number of minibatches',
5000)
spec.addIntOption(
'num_cooldown',
'Last #minibatches to refresh running mean/std for batchnorm '
'in addition to the training stage',
0)
spec.addIntOption(
'num_episode',
'number of episodes',
10000)
spec.addBoolOption(
'tqdm',
'toggle tqdm visualization',
False)
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for SingleProcessRun."""
pass
def setup(self, GC, episode_start=None, episode_summary=None,
after_start=None, before_stop=None):
''' Setup for SingleProcessRun.
Args:
GC(`GameContext`): Game Context
episode_start(func): operations to perform before each episode
episode_summary(func): operations to summarize after each episode
after_start(func): operations called after GC.start() but
before the main loop.
'''
self.GC = GC
self.episode_summary = episode_summary
self.episode_start = episode_start
self.after_start = after_start
self.before_stop = before_stop
def run(self):
"""Main training loop. Initialize Game Context and looping the
required episodes.
Call episode_start and episode_summary before and after each episode
if necessary.
Visualize with a progress bar if ``tqdm`` is set.
Print training stats after each episode.
In the end, print summary for game context and stop it.
"""
self.GC.start()
if self.after_start is not None:
self.after_start()
for k in range(self.options.num_episode):
if self.episode_start is not None:
self.episode_start(k)
if self.options.tqdm:
import tqdm
tq = tqdm.tqdm(total=self.options.num_minibatch, ncols=50)
else:
tq = None
self.episode_counter = 0
while self.episode_counter < self.options.num_minibatch:
old_counter = self.episode_counter
# Make sure if the callback function in GC.run() change the
# counter, then the set value will not be added by 1.
self.GC.run()
self.episode_counter += 1
diff = self.episode_counter - old_counter
if tq is not None:
if diff < 0:
print(f'Diff negative: {old_counter} -> '
f'{self.episode_counter}')
tq = tqdm.tqdm(
total=self.options.num_minibatch, ncols=50)
tq.update(self.episode_counter)
else:
tq.update(diff)
if self.options.num_cooldown > 0:
print(f'Starting {self.options.num_cooldown} cooldown passes')
self.cooldown_counter = 0
while self.cooldown_counter < self.options.num_cooldown:
self.GC.run(
use_cooldown=True,
cooldown_count=self.cooldown_counter)
self.cooldown_counter += 1
if self.episode_summary is not None:
self.episode_summary(k)
if self.before_stop is not None:
self.before_stop()
self.GC.stop()
def set_episode_counter(self, counter):
self.episode_counter = counter
def inc_episode_counter(self, delta):
self.episode_counter += delta
def run_multithread(self):
''' Start training in a multithreaded environment '''
def train_thread():
for i in range(self.options.num_episode):
for k in range(self.options.num_minibatch):
if self.episode_start is not None:
self.episode_start(k)
if k % 500 == 0:
print(
"Receive minibatch %d/%d" %
(k, self.options.num_minibatch))
self.GC.runGroup("train")
# Print something.
self.episode_summary(i)
def actor_thread():
while True:
self.GC.runGroup("actor")
self.GC.start()
# Start the two threads.
train_th = threading.Thread(target=train_thread)
actor_th = threading.Thread(target=actor_thread)
train_th.start()
actor_th.start()
train_th.join()
actor_th.join()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# From https://code.activestate.com/recipes/577504/
from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
def total_size(o, handlers={}, obj_handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
def dict_handler(d): return chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
# estimate sizeof object without __sizeof__
default_size = getsizeof(0)
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = 0
for typ, handler in obj_handlers.items():
if isinstance(o, typ):
s = handler(o)
break
if s == 0:
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
# Example call
if __name__ == '__main__':
d = dict(a=1, b=2, c=3, d=[4, 5, 6, 7], e='a string of chars')
print(total_size(d, verbose=True))
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .hist_states import HistState
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import queue
from collections import defaultdict, Counter
from datetime import datetime
import numpy as np
import torch
import torch.multiprocessing as _mp
import msgpack
import msgpack_numpy
from .size_utils import total_size
mp = _mp.get_context('spawn')
msgpack_numpy.patch()
def dumps(obj):
return msgpack.dumps(obj, use_bin_type=True)
def loads(buf):
return msgpack.loads(buf)
def npimg_convert(img):
img = img.astype('float32') / 255.0
img = np.transpose(img, (2, 0, 1))
return img
def check_done_flag(done_flag):
if done_flag is not None:
with done_flag.get_lock():
return done_flag.value
return False
def islambda(v):
def LAMBDA(): return 0
return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__
def queue_get(q, done_flag=None, fail_comment=None):
if done_flag is None:
return q.get()
done = False
while not done:
try:
return q.get(True, 0.01)
except queue.Empty:
if fail_comment is not None:
print(fail_comment)
if check_done_flag(done_flag):
done = True
# Return
return None
def queue_put(q, item, done_flag=None, fail_comment=None):
if done_flag is None:
q.put(item)
return True
done = False
while not done:
try:
q.put(item, True, 0.01)
return True
except queue.Full:
if fail_comment is not None:
print(fail_comment)
if check_done_flag(done_flag):
done = True
return False
class Switch:
def __init__(self, val=True):
self.val = mp.Value("b", val)
self.lock = mp.Lock()
def get(self):
with self.lock:
return self.val.value
def set(self, v):
with self.lock:
self.val.value = v
class Timer:
def __init__(self):
self.reset()
def __call__(self, name):
self.curr_name = name
return self
def __enter__(self):
self.before[self.curr_name] = datetime.now()
def __exit__(self, t, value, traceback):
after = datetime.now()
elapsed = (after - self.before[self.curr_name]).total_seconds() * 1000
self.records[self.curr_name][0] += elapsed
self.records[self.curr_name][1] += 1
def summary(self):
rets = []
for name, record in self.records.items():
cumtime, count = record
aver_time = float(cumtime) / count
rets.append("[%s] %.3f ms [%d]" % (name, aver_time, count))
return rets
def reset(self):
self.records = defaultdict(lambda: [0, 0])
self.before = {}
class CategoryCounter:
def __init__(self, name=None):
self.name = name
self.reset()
def reset(self):
self.counter = Counter()
def feed(self, data):
for v in data:
self.counter[v] += 1
def summary(self, info=""):
n = sum(self.counter.values())
prompt = "[%s] n = %d " % (info, n)
if n > 0:
return prompt + "\n" + \
"\n".join([" \"%s\": %d (%.2lf%%)" % (k, v, 100.0 * v / n)
for k, v in self.counter.items()])
else:
return prompt
class DelayedStats:
def __init__(self, prefix, max_delay=5):
''' self.entries[key][t] gives the value of key at time t '''
self.prefix = prefix
self.max_delay = max_delay
self.reset()
def reset(self):
# self.entries[key][t_id] -> value
self.entries = defaultdict(dict)
self.predicted_entries = [
defaultdict(dict) for i in range(
self.max_delay)]
self.baseline_entries = [
defaultdict(dict) for i in range(
self.max_delay)]
def feed(self, ts, ids, curr, pred_diff, curr_cb=None, diff_cb=None):
"""Check keys in curr and pred, if there is any key starts with 'fa_',
collect them and compare against each other.
For example (suppose we are at time t):
num_unit_T2: predicted difference: curr["num_unit"] (at time t + 2)
- curr["num_unit"] (at time t).
"""
# curr[key][i] -> value, ids[i] -> id
for k, v in curr.items():
if not k.startswith(self.prefix):
continue
key = k[len(self.prefix):]
history = self.entries[key]
history.update({
str(t) + "_" + str(d): (v[i] if not curr_cb else curr_cb(v[i]))
for i, (t, d) in enumerate(zip(ts, ids))
})
for k, v in pred_diff.items():
if not k.startswith(self.prefix):
continue
key = k[len(self.prefix):]
idx = key.rfind("_")
delay = int(key[idx + 2:])
if delay >= self.max_delay:
continue
key = key[:idx]
# Save it
history = self.predicted_entries[delay][key]
history.update({
str(t + delay) + "_" + str(d): (
self.entries[key][str(t) + "_" + str(d)] +
(v[i] if not diff_cb else diff_cb(v[i]))
) for i, (t, d) in enumerate(zip(ts, ids))
})
history2 = self.baseline_entries[delay][key]
history2.update({
str(t + delay) + "_" + str(d):
self.entries[key][str(t) + "_" + str(d)]
for t, d in zip(ts, ids)
})
def _compare_history(self, h1, h2):
summation = 0
counter = 0
# h1[t_id] -> val
for t_id, v1 in h1.items():
if not (t_id in h2):
continue
v2 = h2[t_id]
summation += (v1 - v2) ** 2
counter += 1
return summation / (counter + 1e-8), counter
def summary(self, info=""):
for k, v in self.entries.items():
for i in range(1, self.max_delay):
# Difference
avgMSE, counter = self._compare_history(
self.predicted_entries[i][k], v)
avgMSE_bl, counter = self._compare_history(
self.baseline_entries[i][k], v)
print(
"[%s][%s_T%d] RMS: %.4lf, Baseline: %.4lf [cnt=%d]" %
(info, k, i, math.sqrt(avgMSE), math.sqrt(avgMSE_bl),
counter)
)
def print_dict(prompt, d, func=str, tight=False):
dem = ", " if tight else "\n"
print(prompt, end='')
if not tight:
print("")
print(dem.join(["%s: %s" % (k, func(d[k])) for k in sorted(d.keys())]))
if not tight:
print("")
def print_dict2(prompt, d1, d2, func=lambda x, y: str(x) + "_" + str(y)):
print(prompt)
items = []
for k in sorted(d1.keys()):
if not (k in d2):
continue
v1 = d1[k]
v2 = d2[k]
items.append("%s: %s" % (k, func(v1, v2)))
print("\n".join(items))
print("")
def is_viskey(k):
return k.startswith("_") or k.startswith("fa_")
def get_avg_str(l):
return ", ".join(["[%d]: %.2lf [cnt=%d]" % (i, math.sqrt(
v / (c + 1e-10)), c) for i, (v, c) in enumerate(zip(l[::2], l[1::2]))])
def get_avg_str2(l, l_bl):
items = []
for i, (v1, c1, v2, c2) in enumerate(
zip(l[::2], l[1::2], l_bl[::2], l_bl[1::2])):
r1 = math.sqrt(v1 / (c1 + 1e-10))
r2 = math.sqrt(v2 / (c2 + 1e-10))
items.append("[%d]: %.2lf=%.2lf/%.2lf(%d)" %
(i, r1 / (r2 + 1e-10), r1, r2, c1))
return ", ".join(items)
class ForwardTracker:
def __init__(self, max_delay=6):
# prediction[key][t] -> value
self.max_delay = max_delay
self.sum_sqr_err = defaultdict(lambda: [0] * (2 * self.max_delay))
self.sum_sqr_err_bl = defaultdict(lambda: [0] * (2 * self.max_delay))
self.reset()
def reset(self):
self.prediction = defaultdict(
lambda: defaultdict(lambda: {
"pred": [0] * self.max_delay,
"baseline": [0] * self.max_delay
})
)
def feed(self, batch_states, curr_batch, forwarded):
# Dump all entries with _, and with fa_
if curr_batch is None and forwarded is None:
state_info = {
k: v for k,
v in batch_states[0].items() if is_viskey(k)}
print_dict("[batch states]: ", state_info, tight=True)
return
batch_info = {
k: v if isinstance(
v,
(int,
float,
str)) else v[0] for k,
v in curr_batch.items() if is_viskey(k)}
fd_info = {k: v.data[0] for k, v in forwarded.items() if is_viskey(k)}
t0 = batch_info["_seq"]
additional_info = {}
used_fd_info = defaultdict(lambda: [0] * self.max_delay)
for k, v in batch_info.items():
pred = self.prediction[k]
# If there is prediction of the current value, also show them.
if t0 in pred:
cp = pred[t0]
# Also compute th error.
for delay, p in enumerate(cp["pred"]):
self.sum_sqr_err[k][2 * delay] += (p - v) ** 2
self.sum_sqr_err[k][2 * delay + 1] += 1
for delay, p in enumerate(cp["baseline"]):
self.sum_sqr_err_bl[k][2 * delay] += (p - v) ** 2
self.sum_sqr_err_bl[k][2 * delay + 1] += 1
additional_info[k + "_pred"] = ", ".join([
"[%d] %.2f" % (delay, p)
for delay, p in enumerate(cp["pred"]) if delay != 0
])
additional_info[k + "_bl"] = ", ".join([
"[%d] %.2f" % (delay, p)
for delay, p in enumerate(cp["baseline"]) if delay != 0
])
del pred[t0]
for t in range(1, self.max_delay):
k_f = k + "_T" + str(t)
if not (k_f in fd_info):
continue
predictions = pred[t0 + t]
predictions["pred"][t] = fd_info[k_f] + v
predictions["baseline"][t] = v
used_fd_info[k][t] = fd_info[k_f]
batch_info.update(additional_info)
used_fd_info = {
k: ", ".join([
"[%d] %.2f" % (i, vv) for i, vv in enumerate(v) if i != 0
])
for k, v in used_fd_info.items()
}
# print("--------------")
# print_dict2("[statistics]:", self.sum_sqr_err, self.sum_sqr_err_bl,
# func=get_avg_str2)
# print_dict("[batch after _make_batch]: ", batch_info)
# print_dict("[state_curr after forward]: ", used_fd_info)
class SeqStats:
def __init__(self, name="seq", seq_limits=None):
# Stats.
self.stats_seq = Counter()
self.clear_stats()
self.name = name
if seq_limits is None:
self.limits = [
1,
100,
200,
300,
400,
500,
600,
700,
800,
900,
1000,
1200,
1400,
1600,
1800,
2000,
2500,
3000,
4000,
5000,
float("inf")]
else:
self.limits = seq_limits
if not np.isinf(self.limits[-1]):
self.limits.append(float("inf"))
def feed(self, seqs):
for seq_num in seqs:
bin_idx = None
for i, limit in enumerate(self.limits[1:]):
if int(seq_num) < limit:
bin_idx = i
break
if seq_num > self.max_seq:
self.max_seq = seq_num
if seq_num < self.min_seq:
self.min_seq = seq_num
name = "[" + str(self.limits[bin_idx]) + ", " + \
str(self.limits[bin_idx + 1]) + ")"
self.stats_seq[name] += 1
def print_stats(self, reset=False):
total_counts = sum(self.stats_seq.values())
if total_counts > 0:
print(
"Distribution of %s [min = %d / max = %d / #count = %d]:" %
(self.name, self.min_seq, self.max_seq, total_counts))
s = ""
for r in sorted(self.stats_seq.keys(),
key=lambda x: float(x.split(",")[0][1:])):
s += "%s: %d [%.2lf%%]\n" % (
r,
self.stats_seq[r],
100.0 * self.stats_seq[r] / total_counts)
print(s)
else:
print(
"Distribution of %s [#count = %d]:" %
(self.name, total_counts))
if reset:
self.clear_stats()
def clear_stats(self):
self.stats_seq.clear()
self.max_seq = 0
self.min_seq = float('inf')
def agent2sender(agent_name):
return agent_name[:-5].encode('ascii')
def sender2agent(sender, i):
return sender + "-%04d" % i
def npimgs2cudatensor(imgs):
imgs = torch.from_numpy(imgs)
imgs = imgs.float().div(255)
imgs = imgs.transpose(0, 1).transpose(0, 2).contiguous()
imgs.cuda()
return imgs
def print_binary(m):
# Print a binary matrix.
if len(m.size()) != 2:
print("Err! cannot print matrix of size " + str(m.size()))
return
s = ""
for i in range(m.size(0)):
for j in range(m.size(2)):
if m[i, j] != 0:
s += "x"
else:
s += "."
s += "\n"
print(s)
def get_total_size(o):
def get_tensor_size(t):
return t.numel() * t.element_size()
tensor_objects = [
torch.ByteTensor,
torch.FloatTensor,
torch.DoubleTensor,
torch.IntTensor,
torch.LongTensor,
torch.cuda.ByteTensor,
torch.cuda.FloatTensor,
torch.cuda.DoubleTensor,
torch.cuda.IntTensor,
torch.cuda.LongTensor,
]
obj_handlers = {obj: get_tensor_size for obj in tensor_objects}
return total_size(o, obj_handlers=obj_handlers)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict, deque
class HistState:
def __init__(self, T, init_state_func=None):
self.hs = defaultdict(lambda: deque())
self.T = T
self.init_state_func = init_state_func
def preprocess(self, ids, seqs):
for id, seq in zip(ids, seqs):
q = self.hs[id]
if seq == 0:
# clear the queue (which might contain old states of the last
# game)
q.clear()
if self.init_state_func is not None:
q.append(self.init_state_func())
def feed(self, ids, hiddens):
'''
h[0] is the oldest element (left-most),
h[-1] is the newest element (right-most)
'''
for id, h in zip(ids, hiddens):
q = self.hs[id]
# Put the newest element from the right.
q.append(h)
# Pop the oldest element from the left.
if len(q) > self.T:
q.popleft()
def _get_batch(self, ids, t, default=None):
list_output = False
if default is None:
templ = self.hs[ids[0]][t]
if isinstance(templ, (dict, list)):
data = []
list_output = True
else:
data = templ.clone().resize_(len(ids), templ.size(0))
else:
data = default.clone()
for i, id in enumerate(ids):
if id in self.hs:
if not list_output:
data[i, :] = self.hs[id][t]
else:
data.append(self.hs[id][t])
return data
def newest(self, ids, t, default=None):
return self._get_batch(ids, -t - 1, default=default)
def oldest(self, ids, t, default=None):
return self._get_batch(ids, t, default=default)
def map(self, ids, func):
hs = self.hs[ids[0]][0].clone()
hs.resize_(len(ids), *list(hs.size()))
for t in range(self.T):
# Collect the data.
for i, id in enumerate(ids):
if t < len(self.hs[id]):
hs[i, :] = self.hs[id][t]
output = func(hs)
# Update the state.
for id, h in zip(ids, output):
if t < len(self.hs[id]):
self.hs[id][t] = h
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from rlpytorch import Model
def apply_nonrecursive(module, fn):
"""Applies a given function only to parameters and buffers of a module.
Adapted from torch.nn.Module._apply.
"""
for param in module._parameters.values():
if param is not None:
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for key, buf in module._buffers.items():
if buf is not None:
module._buffers[key] = fn(buf)
return module
class FP16Model(Model):
def __init__(self, option_map, params, model):
super().__init__(option_map, params)
self.model = model.float()
for module in model.modules():
if not isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
apply_nonrecursive(
module, lambda t: t.half() if t.is_floating_point() else t)
def forward(self, input):
fp16_input = input.half()
return self.model(fp16_input)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .sampler import Sampler
from .sample_methods import sample_multinomial, epsilon_greedy
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import sys
def uniform_multinomial(batchsize, num_action, use_cuda=True):
''' Sample with uniform probability.
Args:
batchsize(int): batch size
num_action(int): total number of actions to sample
use_cuda(bool): indicates if tensor is put on cuda
'''
# [TODO] Make the type more friendly
if use_cuda:
uniform_p = torch.cuda.FloatTensor(num_action).fill_(1.0 / num_action)
else:
uniform_p = torch.FloatTensor(num_action).fill_(1.0 / num_action)
return uniform_p.multinomial(batchsize, replacement=True)
def sample_with_check(probs, greedy=False):
"""multinomial sampling with out of bound check
Args:
probs(tensor): probability to sample from
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
"""
num_action = probs.size(1)
if greedy:
_, actions = probs.max(1)
return actions
while True:
actions = probs.multinomial(1)[:, 0]
cond1 = (actions < 0).sum()
cond2 = (actions >= num_action).sum()
if cond1 == 0 and cond2 == 0:
return actions
print(
"Warning! sampling out of bound! cond1 = %d, cond2 = %d" %
(cond1, cond2))
print("prob = ")
print(probs)
print("action = ")
print(actions)
print("condition1 = ")
print(actions < 0)
print("condition2 = ")
print(actions >= num_action)
print("#actions = ")
print(num_action)
sys.stdout.flush()
def sample_eps_with_check(probs, epsilon, greedy=False):
"""multinomial sampling with out of bound check,
with at least ``epsilon`` probability
Args:
probs(tensor): probability to sample from
epsilon(float): Minimum probability in sampling
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
"""
# actions = self.sample_policy(state_curr[self.sample_node].data, args)
actions = sample_with_check(probs, greedy=greedy)
if epsilon > 1e-10:
num_action = probs.size(1)
batchsize = probs.size(0)
probs = probs.data if isinstance(
probs, torch.autograd.Variable) else probs
rej_p = probs.new().resize_(2)
rej_p[0] = 1 - epsilon
rej_p[1] = epsilon
rej = rej_p.multinomial(batchsize, replacement=True).byte()
uniform_p = probs.new().resize_(num_action).fill_(1.0 / num_action)
uniform_sampling = uniform_p.multinomial(batchsize, replacement=True)
actions[rej] = uniform_sampling[rej]
return actions
def sample_multinomial(state_curr, args, node="pi", greedy=False):
''' multinomial sampling
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
Returns:
A list of actions using multinomial sampling.
'''
if isinstance(state_curr[node], list):
# Action map
probs = state_curr[node]
rx = len(probs)
ry = len(probs[0])
batchsize = probs[0][0].size(0)
actions = [np.zeros((rx, ry), dtype='int32') for i in range(batchsize)]
for i, actionx_prob in enumerate(probs):
for j, action_prob in enumerate(actionx_prob):
this_action = sample_eps_with_check(
action_prob.data, args.epsilon, greedy=greedy)
for k in range(batchsize):
actions[k][i, j] = this_action[k]
return actions
else:
probs = state_curr[node].data
return sample_eps_with_check(probs, args.epsilon, greedy=greedy)
def epsilon_greedy(state_curr, args, node="pi"):
''' epsilon greedy sampling
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
Returns:
A list of actions using epsilon greedy sampling.
'''
return sample_multinomial(state_curr, args, node=node, greedy=True)
def original_distribution(state_curr, args, node="pi"):
''' Send original probability as it is.
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
Returns:
A list of original probabilities.
'''
probs = state_curr[node].data
batchsize = probs.size(0)
# Return a list of list.
return [list(probs[i]) for i in range(batchsize)]
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from .sample_methods import sample_multinomial, epsilon_greedy
class Sampler(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'sample_policy',
'choices of epsilon-greedy, multinomial, or uniform',
'epsilon-greedy')
spec.addBoolOption(
'store_greedy',
('if enabled, picks maximum-probability action; '
'otherwise, sample from distribution'),
False)
spec.addFloatOption(
'epsilon',
'used in epsilon-greedy',
0.0)
spec.addStrListOption(
'sample_nodes',
'nodes to be sampled and saved',
['pi,a'])
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Sampler."""
self.sample_nodes = []
for nodes in self.options.sample_nodes:
policy, action = nodes.split(",")
self.sample_nodes.append((policy, action))
def sample(self, state_curr):
"""Sample an action from distribution using a certain sample method
Args:
state_curr(dict): current state containing all data
"""
# TODO: This only handles epsilon_greedy and multinomial for now. Add
# uniform and original_distribution?
sampler = (epsilon_greedy
if self.options.store_greedy
else sample_multinomial)
actions = {}
for pi_node, a_node in self.sample_nodes:
actions[a_node] = sampler(state_curr, self.options, node=pi_node)
actions[pi_node] = state_curr[pi_node].data
return actions
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .stats import EvalCount, RewardCount, WinRate, Stats
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import import_options, PyOptionSpec
class EvalCount(object):
''' Eval Count. Run games and record required stats.'''
def __init__(self):
# All previous ids.
self.ids = {}
# id for old models.
# If this variable is set, then do not count win_rate of ids_exclude.
self.ids_exclude = set()
self.summary_count = 0
self.num_terminal = 0
def reset(self):
pass
def _on_terminal(self, id, record):
pass
def reset_on_new_model(self):
self.reset()
self.ids_exclude.update(self.ids.keys())
self.ids = dict()
def feed(self, id, *args, **kwargs):
# Game is running, not reaching terminal yet.
# Register a game id.
if id not in self.ids:
self.ids[id] = 0
self.ids[id] = self._on_game(id, self.ids[id], *args, **kwargs)
def count_completed(self):
return self.num_terminal
def terminal(self, id):
# If this game id ended and is in the exclude list, skip
# It is not counted as the number of games completed.
if id in self.ids_exclude:
self.ids_exclude.remove(id)
if id in self.ids:
del self.ids[id]
return
if id in self.ids:
self._on_terminal(id, self.ids[id])
# This game is over, remove game id if it is already in ids
del self.ids[id]
self.num_terminal += 1
# else:
# This should only happen when seq=0
# print("id=%s seq=%d, winner=%d" % (id, seq, winner))
def summary(self):
ret = self._summary()
self.reset()
self.num_terminal = 0
self.summary_count += 1
return ret
def print_summary(self):
summary = self.summary()
for k, v in summary.items():
print("%s: %s" % (str(k), str(v)))
def feed_batch(self, batch, hist_idx=0):
ids = batch["id"][hist_idx]
last_terminals = batch["last_terminal"][hist_idx]
last_r = batch["last_r"][hist_idx]
for batch_idx, (id, last_terminal) in enumerate(
zip(ids, last_terminals)):
self.feed(id, last_r[batch_idx])
if last_terminal:
self.terminal(id)
class RewardCount(EvalCount):
''' Class to accumulate rewards achieved'''
def __init__(self):
super(RewardCount, self).__init__()
self.reset()
def reset(self):
self.n = 0
self.sum_reward = 0
def _on_terminal(self, id, record):
self.sum_reward += record
self.n += 1
def _on_game(self, id, record, reward, seq=None):
return record + reward
def _summary(self):
str_reward = "[%d] Reward: %.2f/%d" % (
self.summary_count,
float(self.sum_reward) / (self.n + 1e-10),
self.n
)
return dict(str_reward=str_reward)
class WinRate(EvalCount):
''' Class to accumulate game results to win rate'''
def __init__(self):
super(WinRate, self).__init__()
self.total_win_count = 0
self.total_lose_count = 0
self.summary_count = 0
self.highest_win_rate = -1.0
self.highest_win_rate_idx = -1
def reset(self):
self.win_count = 0
self.lose_count = 0
def _on_game(self, id, record, final_reward, seq=None):
if final_reward > 0.5:
self.win_count += 1
self.total_win_count += 1
elif final_reward < -0.5:
self.lose_count += 1
self.total_lose_count += 1
def _summary(self):
total = self.win_count + self.lose_count
win_rate = self.win_count / (total + 1e-10)
new_record = False
if win_rate > self.highest_win_rate:
self.highest_win_rate = win_rate
self.highest_win_rate_idx = self.summary_count
new_record = True
str_win_rate = (
f'[{self.summary_count}] Win rate: {win_rate:.3f} '
f'[{self.win_count}/{self.lose_count}/{total}], '
f'Best win rate: {self.highest_win_rate:.3f} '
f'[{self.highest_win_rate_idx}]'
)
total = self.total_win_count + self.total_lose_count
str_acc_win_rate = "Accumulated win rate: %.3f [%d/%d/%d]" % (
self.total_win_count / (total + 1e-10),
self.total_win_count, self.total_lose_count, total
)
return dict(
new_record=new_record,
count=self.summary_count,
best_win_rate=self.highest_win_rate,
str_win_rate=str_win_rate,
str_acc_win_rate=str_acc_win_rate,
)
def win_count(self): return self.total_win_count
def lose_count(self): return self.total_lose_count
def total_winlose_count(
self): return self.total_win_count + self.total_lose_count
def winlose_count(self): return self.win_count + self.lose_count
class Stats(EvalCount):
@classmethod
def get_option_spec(cls, stats_name=''):
spec = PyOptionSpec()
spec.addStrOption(
stats_name + '_stats',
'type of stat to report (rewards or winrate)',
'')
return spec
def __init__(self, option_map, stats_name=''):
"""Initialization for Stats."""
import_options(self, option_map, self.get_option_spec(stats_name))
self.name = stats_name + "_stats"
self.collector = None
self.stats_name = getattr(self.options, self.name)
if self.stats_name == "rewards":
self.collector = RewardCount()
elif self.stats_name == "winrate":
self.collector = WinRate()
else:
self.collector = None
print("Stats: Name " + str(self.stats_name) + " is not known!")
# raise ValueError(
# "Name " + str(self.stats_name) + " is not known!")
def is_valid(self):
return self.collector is not None
def feed(self, id, *args, **kwargs):
self.collector.feed(id, *args, **kwargs)
def count_completed(self):
return self.collector.count_completed()
def reset_on_new_model(self):
self.collector.reset_on_new_model()
def terminal(self, id):
return self.collector.terminal(id)
def reset(self):
self.collector.reset()
def summary(self):
return self.collector.summary()
def print_summary(self):
self.collector.print_summary()
def feed_batch(self, batch, hist_idx=0):
return self.collector.feed_batch(batch, hist_idx=hist_idx)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import time
from collections import defaultdict
class RLTimer:
''' A customized timer class'''
def __init__(self):
self.overall_counts = defaultdict(int)
self.last_overall_mark = defaultdict(lambda: -1)
self.restart()
def restart(self):
self.start_time = time.time()
self.curr_time = datetime.now()
self.durations = defaultdict(lambda: dict(duration=0, counter=0))
def record(self, name):
curr_time = datetime.now()
self.durations[name]["duration"] += (curr_time -
self.curr_time).total_seconds()
self.durations[name]["counter"] += 1
self.overall_counts[name] += 1
self.curr_time = curr_time
def print(self, nstep):
final_time = time.time()
total_duration = (final_time - self.start_time) * 1000.0 / nstep
s = ", ".join(
"%s: %.3f ms" %
(name,
d["duration"] *
1000.0 /
d["counter"]) for name,
d in self.durations.items())
return "Total: %.3f ms. " % total_duration + s
def printInterval(self, name, nstep, callback):
if self.checkPeriodicCondition(name, nstep):
callback(self)
self.restart()
self.updatePeriodicCondition(name)
def checkPeriodicCondition(self, name, nstep):
curr_count = self.overall_counts[name]
last_count = self.last_overall_mark[name]
return curr_count > last_count and curr_count % nstep == 0
def updatePeriodicCondition(self, name):
self.last_overall_mark[name] = self.overall_counts[name]
def getPeriodicValue(self, name):
return self.overall_counts[name]
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .utils import ModelSaver, MultiCounter, topk_accuracy
from .trainer import Trainer, Evaluator
from .lstm_trainer import LSTMTrainer
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import torch
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from ..stats import Stats
from ..utils import HistState
from .utils import ModelSaver, MultiCounter
class LSTMTrainer(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'freq_update',
'frequency of model update',
1)
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'gpu',
'which GPU to use',
-1)
spec.addIntOption(
'T',
'number of timestamps',
6)
spec.addStrOption(
'parsed_args',
'dummy option',
'')
spec.merge(Stats.get_option_spec('trainer'))
spec.merge(ModelSaver.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, verbose=False):
self.stats = Stats(option_map, "trainer")
self.saver = ModelSaver(option_map)
self.counter = MultiCounter()
# [TODO] Hard coded now, need to fix.
num_hiddens = 13 * 25
gpu = self.options.gpu
assert gpu is not None and gpu >= 0
def init_state():
return torch.FloatTensor(num_hiddens).cuda(gpu).zero_()
self.hs = HistState(self.options.T, init_state)
self.stats.reset()
def episode_start(self, i):
pass
def actor(self, batch):
self.counter.inc("actor")
ids = batch["id"][0]
seqs = batch["seq"][0]
self.hs.preprocess(ids, seqs)
hiddens = Variable(self.hs.newest(ids, 0))
m = self.mi["actor"]
m.set_volatile(True)
state_curr = m(batch.hist(0), hiddens)
m.set_volatile(False)
reply_msg = self.sampler.sample(state_curr)
reply_msg["rv"] = self.mi["actor"].step
next_hiddens = m.transition(state_curr["h"], reply_msg["a"])
self.hs.feed(ids, next_hiddens.data)
self.stats.feed_batch(batch)
return reply_msg
def train(self, batch):
self.counter.inc("train")
mi = self.mi
ids = batch["id"][0]
T = batch["s"].size(0)
hiddens = self.hs.newest(ids, T - 1)
mi.zero_grad()
self.rl_method.update(mi, batch, hiddens, self.counter.stats)
mi.update_weights()
if self.counter.counts["train"] % self.options.freq_update == 0:
mi.update_model("actor", mi["model"])
def episode_summary(self, i):
prefix = "[%s][%d] Iter" % (
str(datetime.now()), self.options.batchsize) + "[%d]: " % i
print(prefix)
if self.counter.counts["train"] > 0:
self.saver.feed(self.mi["model"])
print(
"Command arguments:", ' '.join(map(str, self.options.parsed_args)))
self.counter.summary(global_counter=i)
print("")
self.stats.print_summary()
if self.stats.count_completed() > 10000:
self.stats.reset()
def setup(self, rl_method=None, mi=None, sampler=None):
self.rl_method = rl_method
self.mi = mi
self.sampler = sampler
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import defaultdict, deque, Counter
from datetime import datetime
from elf.options import auto_import_options, PyOptionSpec
class SymLink(object):
def __init__(self, sym_prefix, latest_k=5):
self.sym_prefix = sym_prefix
self.latest_k = latest_k
self.latest_files = deque()
def feed(self, filename):
self.latest_files.appendleft(filename)
if len(self.latest_files) > self.latest_k:
self.latest_files.pop()
for k, name in enumerate(self.latest_files):
symlink_file = self.sym_prefix + str(k)
try:
if os.path.exists(symlink_file):
os.unlink(symlink_file)
os.symlink(name, symlink_file)
except BaseException:
print(
"Build symlink %s for %s failed, skipped" %
(symlink_file, name))
class ModelSaver(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'record_dir',
'directory to record in',
'./record')
spec.addStrOption(
'save_prefix',
'prefix of savefiles',
'save')
spec.addStrOption(
'save_dir',
'directory for savefiles',
os.environ.get('save', './'))
spec.addStrOption(
'latest_symlink',
'name for latest model symlink',
'latest')
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
return spec
@auto_import_options
def __init__(self, option_map):
self.save = (self.options.num_games == self.options.batchsize)
if self.save and not os.path.exists(self.options.record_dir):
os.mkdir(self.options.record_dir)
if not os.path.exists(self.options.save_dir):
os.mkdir(self.options.save_dir)
self.symlinker = SymLink(
os.path.join(
self.options.save_dir,
self.options.latest_symlink))
def feed(self, model):
basename = self.options.save_prefix + "-%d.bin" % model.step
print("Save to " + self.options.save_dir)
filename = os.path.join(self.options.save_dir, basename)
print("Filename = " + filename)
model.save(filename)
# Create a symlink
self.symlinker.feed(basename)
class ValueStats(object):
def __init__(self, name=None):
self.name = name
self.reset()
def feed(self, v):
self.summation += v
if v > self.max_value:
self.max_value = v
self.max_idx = self.counter
if v < self.min_value:
self.min_value = v
self.min_idx = self.counter
self.counter += 1
def summary(self, info=None):
info = "" if info is None else info
name = "" if self.name is None else self.name
if self.counter > 0:
try:
return "%s%s[%d]: avg: %.5f, min: %.5f[%d], max: %.5f[%d]" % (
info, name, self.counter, self.summation / self.counter,
self.min_value, self.min_idx, self.max_value, self.max_idx
)
except BaseException:
return "%s%s[Err]:" % (info, name)
else:
return "%s%s[0]" % (info, name)
def reset(self):
self.counter = 0
self.summation = 0.0
self.max_value = -1e38
self.min_value = 1e38
self.max_idx = None
self.min_idx = None
def topk_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class MultiCounter(object):
def __init__(self, verbose=False):
self.last_time = None
self.verbose = verbose
self.counts = Counter()
self.stats = defaultdict(lambda: ValueStats())
self.total_count = 0
def inc(self, key):
if self.verbose:
print("[MultiCounter]: %s" % key)
self.counts[key] += 1
self.total_count += 1
def reset(self):
for k in sorted(self.stats.keys()):
self.stats[k].reset()
self.counts = Counter()
self.total_count = 0
self.last_time = datetime.now()
def summary(self, global_counter=None):
this_time = datetime.now()
if self.last_time is not None:
print(
"[%d] Time spent = %f ms" %
(global_counter,
(this_time - self.last_time).total_seconds() * 1000))
for key, count in self.counts.items():
print("%s: %d/%d" % (key, count, self.total_count))
for k in sorted(self.stats.keys()):
v = self.stats[k]
print(v.summary(info=str(global_counter) + ":" + k))
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from datetime import datetime
from elf.options import auto_import_options, import_options, PyOptionSpec
from ..stats import Stats
from .timer import RLTimer
from .utils import ModelSaver, MultiCounter
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'elf'))
# import torch.multiprocessing as _mp
# mp = _mp.get_context('spawn')
class Evaluator(object):
@classmethod
def get_option_spec(cls, name='eval'):
spec = PyOptionSpec()
spec.addStrListOption(
'keys_in_reply',
'keys in reply',
[])
spec.addIntOption(
'num_minibatch',
'number of minibatches',
5000)
spec.addStrListOption(
'parsed_args',
'dummy option',
'')
spec.merge(Stats.get_option_spec(name))
return spec
def __init__(
self,
option_map,
name='eval',
stats=True,
verbose=False,
actor_name="actor"):
"""Initialization for Evaluator."""
import_options(self, option_map, self.get_option_spec(name))
if stats:
self.stats = Stats(option_map, name)
else:
self.stats = None
if self.stats is not None and not self.stats.is_valid():
self.stats = None
self.name = name
self.actor_name = actor_name
self.verbose = verbose
self.keys_in_reply = set(self.options.keys_in_reply)
def episode_start(self, i):
''' Called before each episode. Reset ``actor_count`` to 0.
Args:
i(int): index in the minibatch
'''
self.actor_count = 0
def actor(self, batch):
"""Actor.
Get the model, forward the batch and get a distribution.
Sample from it and act.
Reply the message to game engine.
Args:
batch(dict): batch data
Returns:
reply_msg(dict):
``pi``: policy, ``a``: action,
``V``: value, `rv`: reply version,
signatured by step
"""
if self.verbose:
print("In Evaluator[%s]::actor" % self.name)
# actor model.
m = self.mi[self.actor_name]
m.set_volatile(True)
state_curr = m.forward(batch)
m.set_volatile(False)
if self.sampler is not None:
reply_msg = self.sampler.sample(state_curr)
else:
reply_msg = dict(pi=state_curr["pi"].data)
if self.stats is not None:
self.stats.feed_batch(batch)
if "rv" in self.keys_in_reply:
reply_msg["rv"] = self.mi[self.actor_name].step
if "V" in self.keys_in_reply:
reply_msg["V"] = state_curr["V"].data
self.actor_count += 1
return reply_msg
def episode_summary(self, i):
''' Called after each episode. Print stats and summary
Args:
i(int): index in the minibatch
'''
print(
"[%s] actor count: %d/%d" %
(self.name,
self.actor_count,
self.options.num_minibatch))
if self.stats is not None:
self.stats.print_summary()
if self.stats.count_completed() > 10000:
self.stats.reset()
def setup(self, mi=None, sampler=None):
''' Setup `ModelInterface` and `Sampler`. Resetting stats.
Args:
mi(`ModelInterface`)
sample(`Sampler`)
'''
self.mi = mi
self.sampler = sampler
if self.stats is not None:
self.stats.reset()
class Trainer(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'freq_update',
'frequency of model update',
1)
spec.addBoolOption(
'save_first',
'save first model',
False)
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.merge(Evaluator.get_option_spec('trainer'))
spec.merge(ModelSaver.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, verbose=False, actor_name="actor"):
"""Initialization for Trainer."""
self.timer = RLTimer()
self.verbose = verbose
self.last_time = None
self.evaluator = Evaluator(
option_map,
'trainer',
verbose=verbose,
actor_name=actor_name)
self.saver = ModelSaver(option_map)
self.counter = MultiCounter(verbose=verbose)
self.just_update = False
def actor(self, batch):
"""Actor.
Get the model, forward the batch and get a distribution.
Sample from it and act.
Reply the message to game engine.
Args:
batch(dict): batch data
Returns:
reply_msg(dict):
``pi``: policy, ``a``: action, ``V``: value,
`rv`: reply version, signatured by step
"""
self.counter.inc("actor")
return self.evaluator.actor(batch)
def train(self, batch, *args, **kwargs):
''' Trainer.
Get the model, forward the batch and update the weights.
Args:
batch(dict): batch data
'''
mi = self.evaluator.mi
self.counter.inc("train")
self.timer.record("batch_train")
mi.zero_grad()
res = self.rl_method.update(mi, batch,
self.counter.stats, *args, **kwargs)
if res["backprop"]:
mi.update_weights()
self.timer.record("compute_train")
if self.counter.counts["train"] % self.options.freq_update == 0:
# Update actor model
# print("Update actor model")
# Save the current model.
if "actor" in mi:
mi.update_model("actor", mi["model"])
self.just_updated = True
self.just_updated = False
def episode_reset(self):
''' Reset stats '''
self.counter.reset()
self.timer.restart()
def episode_start(self, i):
''' Called before each episode.
Args:
i(int): index in the minibatch
'''
self.evaluator.episode_start(i)
def episode_summary(self, i, save=True):
"""Called after each episode. Print stats and summary.
Also print arguments passed in.
Args:
i(int): index in the minibatch
"""
prefix = "[%s][%d] Iter" % (
str(datetime.now()), self.options.batchsize) + "[%d]: " % i
print(prefix)
if self.counter.counts["train"] > 0 and save:
self.saver.feed(self.evaluator.mi["model"])
print(
"Command arguments:", ' '.join(map(str, self.options.parsed_args)))
self.counter.summary(global_counter=i)
print("")
self.evaluator.episode_summary(i)
self.episode_reset()
return self.evaluator.mi["model"].step
def setup(self, rl_method=None, mi=None, sampler=None):
''' Setup `RLMethod`, ModelInterface` and `Sampler`
Args:
rl_method(RLmethod)
mi(`ModelInterface`)
sample(`Sampler`)
'''
self.rl_method = rl_method
self.evaluator.setup(mi=mi, sampler=sampler)
if self.options.save_first:
print("Save first: ")
self.saver.feed(self.evaluator.mi["model"])
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
from collections import defaultdict
import numpy as np
import torch
class Allocator(object):
''' A wrapper class for batch data'''
torch_types = {
"int32_t": torch.IntTensor,
"int64_t": torch.LongTensor,
"float": torch.FloatTensor,
"unsigned char": torch.ByteTensor,
"char": torch.ByteTensor
}
numpy_types = {
"int32_t": 'i4',
'int64_t': 'i8',
'float': 'f4',
'unsigned char': 'byte',
'char': 'byte'
}
@staticmethod
def _alloc(p, gpu, use_numpy=True):
name = p.field().name()
type_name = p.field().type_name()
sz = p.field().sz().vec()
print(name, type_name, sz)
if not use_numpy:
v = Allocator.torch_types[type_name](*sz)
if gpu is not None:
with torch.cuda.device(gpu):
v = v.pin_memory()
v.fill_(1)
# Return pointer, size and byte_stride
strides = [i * v.element_size() for i in v.stride()]
p.set(v.data_ptr(), strides)
else:
v = np.zeros(sz, dtype=Allocator.numpy_types[type_name])
v[:] = 1
import pdb
pdb.set_trace()
# Return pointer, size and byte_size
p.set(v.ctypes.data, v.strides)
return name, v
@staticmethod
def spec2batches(ctx, batchsize, spec, gpu, use_numpy=False, num_recv=1):
batch_spec = []
name2idx = defaultdict(lambda: list())
idx2name = dict()
for name, v in spec.items():
print("%s: %s" % (name, v))
# TODO this might not good since it changes the input.
if "input" not in v or v["input"] is None:
v["input"] = []
if "reply" not in v or v["reply"] is None:
v["reply"] = []
this_batchsize = v.get("batchsize", batchsize)
keys = list(set(v["input"] + v["reply"]))
print("SharedMem: \"%s\", keys: %s" % (name, str(keys)))
smem_opts = ctx.createSharedMemOptions(name, this_batchsize)
smem_opts.setTimeout(v.get("timeout_usec", 0))
for _ in range(num_recv):
smem = ctx.allocateSharedMem(smem_opts, keys)
spec = dict((
Allocator._alloc(smem[field], gpu, use_numpy=use_numpy)
for field in keys
))
# Split spec.
spec_input = {key: spec[key] for key in v["input"]}
spec_reply = {key: spec[key] for key in v["reply"]}
batch_spec.append(dict(input=spec_input, reply=spec_reply))
idx = smem.getSharedMemOptions().idx()
name2idx[name].append(idx)
idx2name[idx] = name
return batch_spec, name2idx, idx2name
def tensor_slice(t, dim, b, e=None):
if e is None:
e = b + 1
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
class Batch:
def __init__(self, _GC=None, _batchdim=0, _histdim=None, **kwargs):
'''Initialize `Batch` class.
Pass in a dict and wrap it into ``self.batch``
'''
self.GC = _GC
self.batchdim = _batchdim
self.histdim = _histdim
self.batch = kwargs
def empty_copy(self):
batch = Batch()
batch.GC = self.GC
batch.batchdim = self.batchdim
batch.histdim = self.histdim
return batch
def first_k(self, batchsize):
batch = self.empty_copy()
batch.batch = {
k: tensor_slice(
v,
self.batchdim,
0,
batchsize) for k,
v in self.batch.items()}
return batch
def __getitem__(self, key):
'''Get a key from batch. Can be either ``key`` or ``last_key``
Args:
key(str): key name. e.g. if ``r`` is passed in,
will search for ``r`` or ``last_r``
'''
if key in self.batch:
return self.batch[key]
else:
key_with_last = "last_" + key
if key_with_last in self.batch:
return self.batch[key_with_last][1:]
else:
raise KeyError(
"Batch(): specified key: %s or %s not found!" %
(key, key_with_last))
def add(self, key, value):
'''Add key=value in Batch.
This is used when you want to send additional state to the
learning algorithm, e.g., hidden state collected from the
previous iterations.
'''
self.batch[key] = value
return self
def __contains__(self, key):
return key in self.batch or "last_" + key in self.batch
def setzero(self):
''' Set all tensors in the batch to 0 '''
for _, v in self.batch.items():
v[:] = 0
def copy_from(self, src):
''' copy all keys and values from another dict or `Batch` object
Args:
src(dict or `Batch`): batch data to be copied
'''
this_src = src if isinstance(src, dict) else src.batch
key_assigned = {k: False for k in self.batch.keys()}
keys_extra = []
for k, v in this_src.items():
# Copy it down to cpu.
if k not in self.batch:
keys_extra.append(k)
continue
bk = self.batch[k]
key_assigned[k] = True
if v is None:
continue
if isinstance(v, list) and bk.numel() == len(v):
bk = bk.view(-1)
for i, vv in enumerate(v):
bk[i] = vv
elif isinstance(v, (int, float)):
bk.fill_(v)
else:
try:
bk[:] = v.squeeze_()
except BaseException:
import pdb
pdb.set_trace()
# Check whether there is any key missing.
keys_missing = [
k for k, assigned in key_assigned.items() if not assigned]
return keys_extra, keys_missing
def hist(self, hist_idx, key=None):
'''
return batch history.
Args:
s(int): s=1 means going back in time by one step, etc
key(str): if None, return all key's history,
otherwise just return that key's history
'''
if self.histdim is None:
raise ValueError("No histdim information for the batch")
if key is None:
new_batch = self.empty_copy()
new_batch.batch = {
k: tensor_slice(v, self.histdim, hist_idx)
for k, v in self.batch.items()
}
return new_batch
else:
return tensor_slice(self[key], self.histdim, hist_idx)
def half(self):
'''transfer batch data to fp16'''
new_batch = self.empty_copy()
new_batch.batch = {k: v.half()
for k, v in self.batch.items()}
return new_batch
def cpu2gpu(self, gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
new_batch = self.empty_copy()
new_batch.batch = {k: v.cuda(gpu, non_blocking=non_blocking)
for k, v in self.batch.items()}
return new_batch
def cpu2cpu(self, gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
new_batch = self.empty_copy()
new_batch.batch = {k: v.clone() for k, v in self.batch.items()}
return new_batch
def transfer_cpu2gpu(self, batch_gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
for k, v in self.batch.items():
batch_gpu[k].copy_(v, non_blocking=non_blocking)
def transfer_cpu2cpu(self, batch_dst, non_blocking=True):
''' transfer batch data to cpu '''
# For each time step
for k, v in self.batch.items():
batch_dst[k].copy_(v)
def pin_clone(self):
''' clone and pin memory for faster transportations to gpu '''
batch = self.empty_copy()
batch.batch = {k: v.clone().pin_memory()
for k, v in self.batch.items()}
return batch
def to_numpy(self):
''' convert batch data to numpy format '''
return {
k: (v.numpy() if not isinstance(v, np.ndarray) else v)
for k, v in self.batch.items()
}
class GCWrapper:
def __init__(
self,
GC,
batchsize,
spec,
batchdim=0,
histdim=None,
use_numpy=False,
gpu=None,
params=dict(),
verbose=True,
num_recv=1):
'''Initialize GCWarpper
Parameters:
GC(C++ class): Game Context
co(C type): context parameters.
descriptions(list of tuple of dict):
descriptions of input and reply entries.
Detailed explanation can be seen in
:doc:`wrapper-python`.
The Python interface of wrapper.
use_numpy(boolean): whether we use numpy array (or PyTorch tensors)
gpu(int): gpu to use.
params(dict): additional parameters
'''
# TODO Make a unified argument server and remove ``params``
self.batches, self.name2idx, self.idx2name = Allocator.spec2batches(
GC.ctx(), batchsize, spec,
use_numpy=use_numpy, gpu=gpu, num_recv=num_recv)
self.batchdim = batchdim
self.histdim = histdim
self.gpu = gpu
self.params = params
self.GC = GC
self._cb = {}
def reg_has_callback(self, key):
return key in self.name2idx
def reg_callback_if_exists(self, key, cb):
if self.reg_has_callback(key):
self.reg_callback(key, cb)
return True
else:
return False
def reg_callback(self, key, cb):
'''Set callback function for key
Parameters:
key(str): the key used to register the callback function.
If the key is not present in the descriptions,
return ``False``.
cb(function): the callback function to be called.
The callback function has the signature
``cb(input_batch, input_batch_gpu, reply_batch)``.
'''
if key not in self.name2idx:
raise ValueError("Callback[%s] is not in the specification" % key)
if cb is None:
print("Warning: Callback[%s] is registered to None" % key)
for idx in self.name2idx[key]:
# print("Register " + str(cb) + " at idx: %d" % idx)
self._cb[idx] = cb
return True
def _makebatch(self, key_array):
return Batch(
_GC=self.GC,
_batchdim=self.batchdim,
_histdim=self.histdim,
**key_array)
def _call(self, smem, *args, **kwargs):
idx = smem.getSharedMemOptions().idx()
# print("smem idx: %d, label: %s" % (idx, self.idx2name[idx]))
# print(self.name2idx)
if idx not in self._cb:
raise ValueError("smem.idx[%d] is not in callback functions" % idx)
if self._cb[idx] is None:
return
batchsize = smem.effective_batchsize()
assert batchsize > 0
picked = self._makebatch(self.batches[idx]["input"]).first_k(batchsize)
if self.gpu is not None:
picked = picked.cpu2gpu(self.gpu)
# Save the infos structure, if people want to have access to state
# directly, they can use infos.s[i], which is a state pointer.
picked.smem = smem
picked.batchsize = batchsize
picked.max_batchsize = smem.getSharedMemOptions().batchsize()
# Get the reply array
if self.batches[idx]["reply"] is not None:
sel_reply = self._makebatch(
self.batches[idx]["reply"]).first_k(batchsize)
else:
sel_reply = None
reply = self._cb[idx](picked, *args, **kwargs)
# If reply is meaningful, send them back.
if isinstance(reply, dict) and sel_reply is not None:
if self.gpu is not None:
with torch.cuda.device(self.gpu):
keys_extra, keys_missing = sel_reply.copy_from(reply)
else:
keys_extra, keys_missing = sel_reply.copy_from(reply)
if len(keys_extra) > 0:
raise ValueError(
"Receive extra keys %s from reply!" %
str(keys_extra))
if len(keys_missing) > 0:
raise ValueError(
"Missing keys %s absent in reply!" %
str(keys_missing))
def _check_callbacks(self):
# Check whether all callbacks are assigned properly.
for key, indices in self.name2idx.items():
for idx in indices:
if idx not in self._cb:
raise ValueError(
("GCWrapper.start(): No callback function "
"for key = %s and idx = %d") %
(key, idx))
def run(self, *args, **kwargs):
'''Wait group of an arbitrary collector key.
Samples in a returned batch are always from the same group,
but the group key of the batch may be arbitrary.
'''
# print("before wait")
smem = self.GC.ctx().wait()
# print("before calling")
self._call(smem, *args, **kwargs)
# print("before_step")
self.GC.ctx().step()
def start(self):
'''Start all game environments'''
self._check_callbacks()
self.GC.ctx().start()
def stop(self):
'''Stop all game environments.
:func:`start()` cannot be called again after :func:`stop()`
has been called.
'''
self.GC.ctx().stop()
def reg_sig_int(self):
import signal
def signal_handler(s, frame):
print('Detected Ctrl-C!')
self.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
class ContextArgs(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'T',
'number of timesteps',
6)
spec.addIntOption(
'mcts_threads',
'number of MCTS threads',
0)
spec.addIntOption(
'mcts_rollout_per_batch',
'Batch size for mcts rollout',
1)
spec.addIntOption(
'mcts_rollout_per_thread',
'number of rollotus per MCTS thread',
1)
spec.addBoolOption(
'mcts_verbose',
'enables mcts verbosity',
False)
spec.addBoolOption(
'mcts_verbose_time',
'enables mcts verbosity for time stats',
False)
spec.addBoolOption(
'mcts_persistent_tree',
'use persistent tree in MCTS',
False)
spec.addBoolOption(
'mcts_use_prior',
'use prior in MCTS',
False)
spec.addIntOption(
'mcts_virtual_loss',
'"virtual" number of losses for MCTS edges',
0)
spec.addStrOption(
'mcts_pick_method',
'criterion for mcts node selection',
'most_visited')
spec.addFloatOption(
'mcts_puct',
'prior weight',
1.0)
spec.addFloatOption(
'mcts_epsilon',
'for exploration enhancement, weight of randomization',
0.0)
spec.addFloatOption(
'mcts_alpha',
'for exploration enhancement, alpha term in gamma distribution',
0.0)
spec.addBoolOption(
"mcts_unexplored_q_zero",
'set all unexplored node to have Q value zero',
False)
spec.addBoolOption(
"mcts_root_unexplored_q_zero",
'set unexplored child of root node to have Q value zero',
False)
return spec
@auto_import_options
def __init__(self, option_map):
pass
def initialize(self, co):
options = self.options
mcts = co.mcts_options
co.num_games = options.num_games
co.batchsize = options.batchsize
co.T = options.T
mcts.num_threads = options.mcts_threads
mcts.num_rollouts_per_thread = options.mcts_rollout_per_thread
mcts.num_rollouts_per_batch = options.mcts_rollout_per_batch
mcts.verbose = options.mcts_verbose
mcts.verbose_time = options.mcts_verbose_time
mcts.virtual_loss = options.mcts_virtual_loss
mcts.pick_method = options.mcts_pick_method
mcts.persistent_tree = options.mcts_persistent_tree
mcts.root_epsilon = options.mcts_epsilon
mcts.root_alpha = options.mcts_alpha
mcts.alg_opt.use_prior = options.mcts_use_prior
mcts.alg_opt.c_puct = options.mcts_puct
mcts.alg_opt.unexplored_q_zero = options.mcts_unexplored_q_zero
mcts.alg_opt.root_unexplored_q_zero = \
options.mcts_root_unexplored_q_zero
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# C++ imports
from _elf import *
# Other imports
from .context_utils import ContextArgs
from .more_labels import MoreLabels
from .utils_elf import GCWrapper, Batch
from .zmq_util import ZMQSender, ZMQReceiver
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import zmq
class ZMQCtx:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, ty, value, tb):
if value is not None:
# print(value)
pass
return True
# print("Send failed for " + self.identity + "..")
class ZMQSender:
def __init__(self, addr, identity, send_timeout=0, recv_timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.sender = self.ctx.socket(zmq.DEALER)
self.sender.identity = identity.encode('ascii')
# self.sender.set_hwm(10000)
if send_timeout > 0:
self.sender.SNDTIMEO = send_timeout
if recv_timeout > 0:
self.sender.RCVTIMEO = recv_timeout
self.sender.connect(addr)
def Send(self, msg, copy=False):
with ZMQCtx():
self.sender.send(msg, copy=copy)
return True
return False
def Receive(self):
with ZMQCtx():
return self.sender.recv()
return None
class ZMQReceiver:
def __init__(self, addr, timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.receiver = self.ctx.socket(zmq.ROUTER)
# self.receiver.set_hwm(10000)
if timeout > 0:
self.receiver.RCVTIMEO = timeout
self.receiver.bind(addr)
def Send(self, identity, msg):
with ZMQCtx():
self.receiver.send_multipart([identity, msg])
return True
return False
def Receive(self):
# return identity, msg
with ZMQCtx():
identity, msg = self.receiver.recv_multipart()
# print(identity)
# print(msg)
return identity, msg
return None, None
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
class MoreLabels(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrListOption(
'additional_labels',
'add additional labels in the batch; e.g. id, seq, last_terminal',
[])
return spec
@auto_import_options
def __init__(self, option_map):
pass
def add_labels(self, desc):
if self.options.additional_labels:
for _, v in desc.items():
v["input"].extend(self.options.additional_labels)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .import_options import auto_import_options, import_options
from .py_option_map import PyOptionMap
from .py_option_spec import PyOptionSpec
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import sys
import elf
from _elf import _options
# We can get rid of this and just eval() the type name.
# Depends on how safe we want to be.
_typename_to_type = {
'str': str,
'int': int,
'float': float,
'bool': bool,
}
class PyOptionSpec(_options.OptionSpec):
"""Override C++ OptionSpec with additional bells and whistles."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def getArgparseOptions(self):
return json.loads(self.getPythonArgparseOptionsAsJSONString())
def toArgparser(self):
"""Creates an ArgumentParser from a PyOptionSpec."""
parser = argparse.ArgumentParser()
parser_options = self.getArgparseOptions()
for parser_option in parser_options:
if 'type' in parser_option['kwargs']:
parser_option['kwargs']['type'] = \
_typename_to_type[parser_option['kwargs']['type']]
parser.add_argument(
*parser_option["args"],
**parser_option["kwargs"])
return parser
def parse(self, args=None, overrides=None):
"""Given a PyOptionSpec, parses the command line parameters
(``sys.argv```) and returns the resulting PyOptionMap.
``args`` can override ``sys.argv`` and ``overrides`` can override
any parsed items.
"""
parser = self.toArgparser()
arg_namespace = parser.parse_args(args=args)
if overrides:
for k, v in overrides.items():
setattr(arg_namespace, k, v)
arg_namespace.parsed_args = list(sys.argv if args is None else args)
option_map = elf.options.PyOptionMap(self)
option_map.loadOptionDict(vars(arg_namespace))
return option_map
@classmethod
def fromClasses(cls, classes):
option_spec = cls()
for c in classes:
option_spec.merge(c.get_option_spec())
return option_spec
def clone(self):
return PyOptionSpec(self)
def __deepcopy__(self, memo):
return self.clone()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import elf
from _elf import _options
class PyOptionMap(_options.OptionMap):
"""Override C++ OptionMap with additional bells and whistles."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def getPyOptionSpec(self):
return elf.options.PyOptionSpec(super().getOptionSpec())
def loadOptionDict(self, option_dict):
return self.loadJSONString(json.dumps(option_dict))
def getOptionDict(self):
return json.loads(self.getJSONString())
def get(self, option_name):
return json.loads(self.getAsJSONString(option_name))
def storeIntoNamespace(self, namespace, option_spec=None):
"""Stores the parameters from a PyOptionMap into a namespace."""
if option_spec is None:
option_spec = self.getPyOptionSpec()
option_names = option_spec.getOptionNames()
for name in option_names:
setattr(namespace, name, self.get(name))
def clone(self):
return PyOptionMap(self)
def __deepcopy__(self, memo):
return self.clone()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def import_options(obj, option_map, option_spec, namespace=None):
"""Stores the parameters from a PyOptionMap into ``obj.options``."""
if namespace is None:
setattr(obj, 'options', argparse.Namespace())
namespace = obj.options
if option_spec is None:
option_spec = option_map.getPyOptionSpec()
option_map.storeIntoNamespace(namespace, option_spec)
def auto_import_options(fn):
"""This decorator applies to __init__ methods where the first argument
is a PyOptionMap.
It copies each required argument (as specified by the class's
``get_option_spec()``) from the PyOptionMap into the object namespace
of ``self.options`` (i.e. ``self.options.blah``).
"""
def call(self, option_map, *args, **kwargs):
import_options(self, option_map, self.get_option_spec())
return fn(self, option_map, *args, **kwargs)
return call
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from . import LoggerLevel, set_level
class GlobalLoggingConfigurator(object):
"""Global configurator for logging."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'loglevel',
('Global log level. Choose from '
'trace, debug, info, warning, error, critical, or off)'),
'info')
return spec
@auto_import_options
def __init__(self, option_map):
pass
def configure(self):
loglevel = LoggerLevel.from_str(self.options.loglevel)
assert loglevel != LoggerLevel.invalid
set_level(loglevel)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Let's import C++ logging functions/classes as-is.
from _elf._logging import *
from .configuration import GlobalLoggingConfigurator
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import traceback
from collections import Counter
def move2xy(v):
if v.lower() == "pass":
return -1, -1
x = ord(v[0].lower()) - ord('a')
# Skip 'i'
if x >= 9:
x -= 1
y = int(v[1:]) - 1
return x, y
def xy2move(x, y):
if x == -1 and y == -1:
return "pass"
if x >= 8:
x += 1
return chr(x + 65) + str(y + 1)
def plot_plane(v):
s = ""
for j in range(v.size(1)):
for i in range(v.size(0)):
if v[i, v.size(1) - 1 - j] != 0:
s += "o "
else:
s += ". "
s += "\n"
print(s)
def topk_accuracy2(batch, state_curr, topk=(1,)):
pi = state_curr["pi"]
import torch
if isinstance(pi, torch.autograd.Variable):
pi = pi.data
score, indices = pi.sort(dim=1, descending=True)
maxk = max(topk)
topn_count = [0] * maxk
for ind, gt in zip(indices, batch["offline_a"][0]):
for i in range(maxk):
if ind[i] == gt[0]:
topn_count[i] += 1
for i in range(maxk):
topn_count[i] /= indices.size(0)
return [topn_count[i - 1] for i in topk]
class GoConsole:
def __init__(self, GC, evaluator):
self.exit = False
self.GC = GC
self.board_size = GC.params["board_size"]
self.evaluator = evaluator
self.last_move_idx = None
def move2action(self, v):
if v.lower() == "pass":
return self.board_size ** 2
x, y = move2xy(v)
return x * self.board_size + y
def action2move(self, a):
if a == self.board_size ** 2:
return "pass"
x = a // self.board_size
y = a % self.board_size
return xy2move(x, y)
def check(self, batch):
reply = self.evaluator.actor(batch)
topk = topk_accuracy2(batch, reply, topk=(1, 2, 3, 4, 5))
for i, v in enumerate(topk):
self.check_stats[i] += v
if sum(topk) == 0:
self.check_stats[-1] += 1
def actor(self, batch):
reply = self.evaluator.actor(batch)
return reply
def showboard(self, batch):
print(batch.GC.getGame(0).showBoard())
def prompt(self, prompt_str, batch):
if self.last_move_idx is not None:
curr_move_idx = batch["move_idx"][0][0]
if curr_move_idx - self.last_move_idx == 1:
self.check(batch)
self.last_move_idx = curr_move_idx
return
else:
n = sum(self.check_stats.values())
print("#Move: " + str(n))
accu = 0
for i in range(5):
accu += self.check_stats[i]
print("Top %d: %.3f" % (i, accu / n))
self.last_move_idx = None
self.showboard(batch)
# Ask user to choose
while True:
if getattr(self, "repeat", 0) > 0:
self.repeat -= 1
cmd = self.repeat_cmd
else:
cmd = input(prompt_str)
items = cmd.split()
if len(items) < 1:
print("Invalid input")
c = items[0]
reply = dict(pi=None, a=None, V=0)
try:
if c == 'p':
reply["a"] = self.move2action(items[1])
return reply
elif c == 'c':
reply = self.evaluator.actor(batch)
return reply
elif c == "s":
channel_id = int(items[1])
plot_plane(batch["s"][0][0][channel_id])
elif c == "a":
reply = self.evaluator.actor(batch)
if "pi" in reply:
score, indices = reply["pi"].squeeze().sort(
dim=0, descending=True)
first_n = int(items[1])
for i in range(first_n):
print("%s: %.3f" %
(self.action2move(indices[i]), score[i]))
else:
print("No key \"pi\"")
elif c == "check":
print("Top %d" % self.check(batch))
elif c == 'check2end':
self.check_stats = Counter()
self.check(batch)
self.last_move_idx = batch["move_idx"][0][0]
if len(items) == 2:
self.repeat = int(items[1])
self.repeat_cmd = "check2end_cont"
return
elif c == "check2end_cont":
if not hasattr(self, "check_stats"):
self.check_stats = Counter()
self.check(batch)
self.last_move_idx = batch["move_idx"][0][0]
return
elif c == "aug":
print(batch["aug_code"][0][0])
elif c == "show":
self.showboard(batch)
elif c == "dbg":
import pdb
pdb.set_trace()
elif c == 'offline_a':
if "offline_a" in batch:
for i, offline_a in \
enumerate(batch["offline_a"][0][0]):
print(
"[%d]: %s" %
(i, self.action2move(offline_a)))
else:
print("No offline_a available!")
elif c == "exit":
self.exit = True
return reply
else:
print("Invalid input: " + cmd + ". Please try again")
except Exception as e:
print("Something wrong! " + str(e))
'''
elif c == "u":
batch.GC.undoMove(0)
self.showboard(batch)
elif c == "h":
handicap = int(items[1])
batch.GC.applyHandicap(0, handicap)
self.showboard(batch)
'''
class GoConsoleGTP:
def on_protocol_version(self, batch, items, reply):
return True, "2"
def on_clear_board(self, batch, items, reply):
reply["a"] = self.actions["clear"]
return True, reply
def on_name(self, batch, items, reply):
return True, "DF2"
def on_komi(self, batch, items, reply):
# For now we just fix komi number.
if items[1] != "7.5":
return False, "We only support 7.5 komi for now"
return True, None
def on_boardsize(self, batch, items, reply):
if items[1] != str(self.board_size):
return (
False,
"We only support %dx%d board for now" % (
self.board_size, self.board_size)
)
return True, None
def on_genmove(self, batch, items, reply):
ret, msg = self.check_player(batch, items[1][0])
if ret:
reply["a"] = self.actions["skip"]
return True, reply
else:
return False, msg
def on_play(self, batch, items, reply):
ret, msg = self.check_player(batch, items[1][0])
if ret:
reply["a"] = self.move2action(items[2])
return True, reply
else:
return False, msg
def on_showboard(self, batch, items, reply):
self.showboard(batch)
return True, None
def on_final_score(self, batch, items, reply):
final_score = self.get_final_score(batch)
if final_score > 0:
return True, "B+%.1f" % final_score
else:
return True, "W+%.1f" % (-final_score)
def on_version(self, batch, items, reply):
return True, "1.0"
def on_exit(self, batch, items, reply):
self.exit = True
return True, reply
def on_quit(self, batch, items, reply):
return self.on_exit(batch, items, reply)
def on_list_commands(self, batch, items, reply):
msg = "\n".join(self.commands.keys())
return True, msg
def __init__(self, GC, evaluator):
self.exit = False
self.GC = GC
self.board_size = GC.params["board_size"]
self.evaluator = evaluator
self.actions = {
"skip": GC.params["ACTION_SKIP"],
"pass": GC.params["ACTION_PASS"],
"resign": GC.params["ACTION_RESIGN"],
"clear": GC.params["ACTION_CLEAR"]
}
self.last_cmd = ""
self.commands = {
key[3:]: func
for key, func in inspect.getmembers(
self, predicate=inspect.ismethod)
if key.startswith("on_")
}
def move2action(self, v):
if v.lower() in self.actions:
return self.actions[v.lower()]
x, y = move2xy(v)
return x * self.board_size + y
def actor(self, batch):
reply = self.evaluator.actor(batch)
return reply
def action2move(self, a):
x = a // self.board_size
y = a % self.board_size
return xy2move(x, y)
def showboard(self, batch):
print(batch.GC.getGame(0).showBoard())
def get_next_player(self, batch):
return batch.GC.getGame(0).getNextPlayer()
def get_last_move(self, batch):
return batch.GC.getGame(0).getLastMove()
def get_final_score(self, batch):
return batch.GC.getGame(0).getLastScore()
def check_player(self, batch, player):
board_next_player = self.get_next_player(batch)
if player.lower() != board_next_player.lower():
return (
False,
("Specified next player %s is not the same as the "
"next player %s on the board") % (
player, board_next_player
)
)
else:
return True, None
def print_msg(self, ret, msg):
print("\n%s %s\n\n" % (("=" if ret else "?"), msg))
def prompt(self, prompt_str, batch):
# Show last command results.
if self.last_cmd == "play" or self.last_cmd == "clear_board":
self.print_msg(True, "")
elif self.last_cmd == "genmove":
self.print_msg(True, self.get_last_move(batch))
self.last_cmd = ""
while True:
cmd = input(prompt_str)
items = cmd.split()
if len(items) < 1:
self.print_msg(False, "Invalid input")
continue
c = items[0]
reply = dict(pi=None, a=None, V=0)
try:
ret, msg = self.commands[c](batch, items, reply)
self.last_cmd = c
if not ret:
self.print_msg(False, msg)
else:
if isinstance(msg, dict):
return msg
elif isinstance(msg, str):
self.print_msg(True, msg)
else:
self.print_msg(True, "")
except Exception:
print(traceback.format_exc())
self.print_msg(False, "Invalid command")
|
#!/usr/bin/env python
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import torch
from console_lib import GoConsoleGTP
from rlpytorch import Evaluator, load_env
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
additional_to_load = {
'evaluator': (
Evaluator.get_option_spec(),
lambda object_map: Evaluator(object_map, stats=None)),
}
# Set game to online model.
env = load_env(
os.environ,
overrides={
'num_games': 1,
'greedy': True,
'T': 1,
'model': 'online',
'additional_labels': ['aug_code', 'move_idx'],
},
additional_to_load=additional_to_load)
evaluator = env['evaluator']
GC = env["game"].initialize()
model_loader = env["model_loaders"][0]
model = model_loader.load_model(GC.params)
mi = env['mi']
mi.add_model("model", model)
mi.add_model("actor", model)
mi["model"].eval()
mi["actor"].eval()
console = GoConsoleGTP(GC, evaluator)
def human_actor(batch):
return console.prompt("", batch)
def actor(batch):
return console.actor(batch)
def train(batch):
console.prompt("DF Train> ", batch)
evaluator.setup(sampler=env["sampler"], mi=mi)
GC.reg_callback_if_exists("actor_black", actor)
GC.reg_callback_if_exists("human_actor", human_actor)
GC.reg_callback_if_exists("train", train)
GC.start()
GC.GC.getClient().setRequest(
mi["actor"].step, -1, env['game'].options.resign_thres, -1)
evaluator.episode_start(0)
while True:
GC.run()
if console.exit:
break
GC.stop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import time
import re
from datetime import datetime
import torch
from rlpytorch import \
Evaluator, load_env, ModelInterface
class Stats(object):
def __init__(self):
self.total_batchsize = 0
self.total_sel_batchsize = 0
self.actor_count = 0
def feed(self, batch):
self.total_sel_batchsize += batch.batchsize
self.total_batchsize += batch.max_batchsize
self.actor_count += 1
if self.total_sel_batchsize >= 500000:
print(datetime.now())
batch_usage = self.total_sel_batchsize / self.total_batchsize
print(f'Batch usage: '
f'{self.total_sel_batchsize}/{self.total_batchsize} '
f'({100.0 * batch_usage:.2f}%)')
wr = batch.GC.getClient().getGameStats().getWinRateStats()
win_rate = (100.0 * wr.black_wins / wr.total_games
if wr.total_games > 0
else 0.0)
print(f'B/W: {wr.black_wins}/{wr.white_wins}. '
f'Black winrate: {win_rate:.2f} {wr.total_games}')
self.total_sel_batchsize = 0
self.total_batchsize = 0
print('Actor count:', self.actor_count)
name_matcher = re.compile(r"save-(\d+)")
def extract_ver(model_loader):
name = os.path.basename(model_loader.options.load)
m = name_matcher.match(name)
return int(m.group(1))
def reload_model(model_loader, params, mi, actor_name, args):
model = model_loader.load_model(params)
if actor_name not in mi:
mi.add_model(actor_name, model, cuda=(args.gpu >= 0), gpu_id=args.gpu)
else:
mi.update_model(actor_name, model)
mi[actor_name].eval()
def reload(mi, model_loader, params, args, root, ver, actor_name):
if model_loader.options.load is None or model_loader.options.load == "":
print('No previous model loaded, loading from', root)
real_path = os.path.join(root, "save-" + str(ver) + ".bin")
else:
this_root = os.path.dirname(model_loader.options.load)
real_path = os.path.join(this_root, "save-" + str(ver) + ".bin")
if model_loader.options.load != real_path:
model_loader.options.load = real_path
reload_model(model_loader, params, mi, actor_name, args)
else:
print('Warning! Same model, skip loading', real_path)
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
# Set game to online model.
actors = ["actor_black", "actor_white"]
additional_to_load = {
("eval_" + actor_name): (
Evaluator.get_option_spec(name="eval_" + actor_name),
lambda object_map, actor_name=actor_name: Evaluator(
object_map, name="eval_" + actor_name,
actor_name=actor_name, stats=None)
)
for i, actor_name in enumerate(actors)
}
additional_to_load.update({
("mi_" + name): (ModelInterface.get_option_spec(), ModelInterface)
for name in actors
})
env = load_env(
os.environ, num_models=2, overrides={'actor_only': True},
additional_to_load=additional_to_load)
GC = env["game"].initialize()
stats = [Stats(), Stats()]
for i in range(len(actors)):
actor_name = actors[i]
stat = stats[i]
e = env["eval_" + actor_name]
print(f'register {actor_name} for e = {e!s}')
e.setup(sampler=env["sampler"], mi=env["mi_" + actor_name])
def actor(batch, e, stat):
reply = e.actor(batch)
stat.feed(batch)
return reply
GC.reg_callback(actor_name,
lambda batch, e=e, stat=stat: actor(batch, e, stat))
root = os.environ.get("root", "./")
print(f'Root: "{root}"')
args = env["game"].options
loop_end = False
def game_start(batch):
print("In game start")
vers = [int(batch["black_ver"][0]), int(batch["white_ver"][0])]
# Use the version number to load models.
for model_loader, ver, actor_name in zip(
env["model_loaders"], vers, actors):
if ver >= 0:
while True:
try:
reload(
env["mi_" + actor_name], model_loader, GC.params,
args, root, ver, actor_name)
break
except BaseException:
import traceback
traceback.print_exc()
time.sleep(10)
def game_end(batch):
nonlocal loop_end
wr = batch.GC.getClient().getGameStats().getWinRateStats()
win_rate = (100.0 * wr.black_wins / wr.total_games
if wr.total_games > 0 else 0.0)
print(f'{datetime.now()!s} B/W: {wr.black_wins}/{wr.white_wins}.'
f'Black winrate: {win_rate:.2f} ({wr.total_games})')
if args.suicide_after_n_games > 0 and \
wr.total_games >= args.suicide_after_n_games:
print(f'#suicide_after_n_games: {args.suicide_after_n_games}, '
f'total_games: {wr.total_games}')
loop_end = True
GC.reg_callback_if_exists("game_start", game_start)
GC.reg_callback_if_exists("game_end", game_end)
GC.start()
if args.eval_model_pair:
if args.eval_model_pair.find(",") >= 0:
black, white = args.eval_model_pair.split(",")
else:
black = extract_ver(env["model_loaders"][0])
white = extract_ver(env["model_loaders"][1])
# Force them to reload in the future.
for model_loader, actor_name in zip(env["model_loaders"], actors):
reload_model(model_loader, GC.params,
env["mi_" + actor_name], actor_name, args)
# We just use one thread to do selfplay.
GC.GC.getClient().setRequest(
int(black), int(white), env['game'].options.resign_thres, 1)
for actor_name in actors:
env["eval_" + actor_name].episode_start(0)
while not loop_end:
GC.run()
GC.stop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import re
import time
import torch
from rlpytorch import load_env, SingleProcessRun, Trainer
matcher = re.compile(r"save-(\d+).bin")
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
additional_to_load = {
'trainer': (
Trainer.get_option_spec(),
lambda option_map: Trainer(option_map)),
'runner': (
SingleProcessRun.get_option_spec(),
lambda option_map: SingleProcessRun(option_map)),
}
env = load_env(os.environ, additional_to_load=additional_to_load)
trainer = env['trainer']
runner = env['runner']
GC = env["game"].initialize()
model_loader = env["model_loaders"][0]
model = model_loader.load_model(GC.params)
env["mi"].add_model("model", model, opt=True)
keep_prev_selfplay = env["game"].options.keep_prev_selfplay
model_ver = 0
model_filename = model_loader.options.load
if isinstance(model_filename, str) and model_filename != "":
realpath = os.path.realpath(model_filename)
m = matcher.match(os.path.basename(realpath))
if m:
model_ver = int(m.group(1))
eval_old_model = env["game"].options.eval_old_model
if eval_old_model >= 0:
GC.GC.getServer().setEvalMode(model_ver, eval_old_model)
else:
GC.GC.getServer().setInitialVersion(model_ver)
selfplay_ver = model_ver
root = os.environ["save"]
print(f'Root: "{root}"')
print(f'Keep prev_selfplay: {keep_prev_selfplay!s}')
def train(batch, *args, **kwargs):
# Check whether the version match.
if keep_prev_selfplay or \
(batch["selfplay_ver"] != selfplay_ver).sum() == 0:
trainer.train(batch, *args, **kwargs)
else:
print(f'Get batch whose selfplay ver is different from '
f'{selfplay_ver}, skipping')
runner.inc_episode_counter(-1)
def train_ctrl(batch, *args, **kwargs):
nonlocal selfplay_ver
old_selfplay_ver = selfplay_ver
selfplay_ver = int(batch["selfplay_ver"][0])
print(
f'Train ctrl: selfplay_ver: {old_selfplay_ver} -> {selfplay_ver}')
GC.GC.getServer().waitForSufficientSelfplay(selfplay_ver)
# Reload old models.
real_path = os.path.join(root, "save-" + str(selfplay_ver) + ".bin")
model_loader.options.load = real_path
while True:
try:
model = model_loader.load_model(GC.params)
break
except BaseException:
time.sleep(10)
env["mi"].remove_model("model")
env["mi"].add_model("model", model, opt=True)
trainer.episode_reset()
runner.set_episode_counter(-1)
GC.reg_callback("train", train)
GC.reg_callback("train_ctrl", train_ctrl)
if GC.reg_has_callback("actor"):
args = env["game"].options
env["mi"].add_model(
"actor",
model,
copy=True,
cuda=(args.gpu >= 0),
gpu_id=args.gpu)
GC.reg_callback("actor", trainer.actor)
trainer.setup(
sampler=env["sampler"],
mi=env["mi"],
rl_method=env["method"])
def episode_summary(i):
nonlocal selfplay_ver
ver = trainer.episode_summary(i)
# This might block (when evaluation does not catch up with training).
GC.GC.getServer().notifyNewVersion(selfplay_ver, ver)
offline_training = (env["game"].options.mode == "offline_train")
def after_start():
nonlocal selfplay_ver
if not offline_training:
print("About to wait for sufficient selfplay")
GC.GC.getServer().waitForSufficientSelfplay(selfplay_ver)
runner.setup(GC, after_start=after_start,
episode_summary=episode_summary,
episode_start=trainer.episode_start)
runner.run()
if __name__ == '__main__':
main()
|
addrs = {
"myserver": "[XXX.XXX.XXX.XXX]",
}
|
import os
import re
import sys
from pathlib import Path
import argparse
import torch
import platform
import importlib
import subprocess
import torch._dynamo
import torch.nn as nn
import torch.nn.functional as F
gpu_arch_ver = os.getenv("MATRIX_GPU_ARCH_VERSION")
gpu_arch_type = os.getenv("MATRIX_GPU_ARCH_TYPE")
channel = os.getenv("MATRIX_CHANNEL")
stable_version = os.getenv("MATRIX_STABLE_VERSION")
package_type = os.getenv("MATRIX_PACKAGE_TYPE")
target_os = os.getenv("TARGET_OS")
is_cuda_system = gpu_arch_type == "cuda"
NIGHTLY_ALLOWED_DELTA = 3
MODULES = [
{
"name": "torchvision",
"repo": "https://github.com/pytorch/vision.git",
"smoke_test": "./vision/test/smoke_test.py",
"extension": "extension",
"repo_name": "vision",
},
{
"name": "torchaudio",
"repo": "https://github.com/pytorch/audio.git",
"smoke_test": "./audio/test/smoke_test/smoke_test.py --no-ffmpeg",
"extension": "_extension",
"repo_name": "audio",
},
]
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
output = self.fc1(x)
return output
def check_version(package: str) -> None:
# only makes sense to check nightly package where dates are known
if channel == "nightly":
check_nightly_binaries_date(package)
else:
if not torch.__version__.startswith(stable_version):
raise RuntimeError(
f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}"
)
def check_nightly_binaries_date(package: str) -> None:
from datetime import datetime, timedelta
format_dt = '%Y%m%d'
torch_str = torch.__version__
date_t_str = re.findall("dev\d+", torch.__version__)
date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt)
if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!"
)
if(package == "all"):
for module in MODULES:
imported_module = importlib.import_module(module["name"])
module_version = imported_module.__version__
date_m_str = re.findall("dev\d+", module_version)
date_m_delta = datetime.now() - datetime.strptime(date_m_str[0][3:], format_dt)
print(f"Nightly date check for {module['name']} version {module_version}")
if date_m_delta.days > NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}"
)
def test_cuda_runtime_errors_captured() -> None:
cuda_exception_missed=True
try:
print("Testing test_cuda_runtime_errors_captured")
torch._assert_async(torch.tensor(0, device="cuda"))
torch._assert_async(torch.tensor(0 + 0j, device="cuda"))
except RuntimeError as e:
if re.search("CUDA", f"{e}"):
print(f"Caught CUDA exception with success: {e}")
cuda_exception_missed = False
else:
raise e
if(cuda_exception_missed):
raise RuntimeError( f"Expected CUDA RuntimeError but have not received!")
def smoke_test_cuda(package: str, runtime_error_check: str) -> None:
if not torch.cuda.is_available() and is_cuda_system:
raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.")
if(package == 'all' and is_cuda_system):
for module in MODULES:
imported_module = importlib.import_module(module["name"])
# TBD for vision move extension module to private so it will
# be _extention.
version = "N/A"
if module["extension"] == "extension":
version = imported_module.extension._check_cuda_version()
else:
version = imported_module._extension._check_cuda_version()
print(f"{module['name']} CUDA: {version}")
if torch.cuda.is_available():
if torch.version.cuda != gpu_arch_ver:
raise RuntimeError(
f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}"
)
print(f"torch cuda: {torch.version.cuda}")
# todo add cudnn version validation
print(f"torch cudnn: {torch.backends.cudnn.version()}")
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
# torch.compile is available only on Linux and python 3.8-3.10
if (sys.platform == "linux" or sys.platform == "linux2") and sys.version_info < (3, 11, 0) and channel == "release":
smoke_test_compile()
elif (sys.platform == "linux" or sys.platform == "linux2") and channel != "release":
smoke_test_compile()
if(runtime_error_check == "enabled"):
test_cuda_runtime_errors_captured()
def smoke_test_conv2d() -> None:
import torch.nn as nn
print("Testing smoke_test_conv2d")
# With square kernels and equal stride
m = nn.Conv2d(16, 33, 3, stride=2)
# non-square kernels and unequal stride and with padding
m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
# non-square kernels and unequal stride and with padding and dilation
basic_conv = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
input = torch.randn(20, 16, 50, 100)
output = basic_conv(input)
if is_cuda_system:
print("Testing smoke_test_conv2d with cuda")
conv = nn.Conv2d(3, 3, 3).cuda()
x = torch.randn(1, 3, 24, 24).cuda()
with torch.cuda.amp.autocast():
out = conv(x)
supported_dtypes = [torch.float16, torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_conv2d with cuda for {dtype}")
conv = basic_conv.to(dtype).cuda()
input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
output = conv(input)
def smoke_test_linalg() -> None:
print("Testing smoke_test_linalg")
A = torch.randn(5, 3)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
U.shape, S.shape, Vh.shape
torch.dist(A, U @ torch.diag(S) @ Vh)
U, S, Vh = torch.linalg.svd(A)
U.shape, S.shape, Vh.shape
torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
A = torch.randn(7, 5, 3)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
torch.dist(A, U @ torch.diag_embed(S) @ Vh)
if is_cuda_system:
supported_dtypes = [torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_linalg with cuda for {dtype}")
A = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
torch.linalg.svd(A)
def smoke_test_compile() -> None:
supported_dtypes = [torch.float16, torch.float32, torch.float64]
def foo(x: torch.Tensor) -> torch.Tensor:
return torch.sin(x) + torch.cos(x)
for dtype in supported_dtypes:
print(f"Testing smoke_test_compile for {dtype}")
x = torch.rand(3, 3, device="cuda").type(dtype)
x_eager = foo(x)
x_pt2 = torch.compile(foo)(x)
print(torch.allclose(x_eager, x_pt2))
# Reset torch dynamo since we are changing mode
torch._dynamo.reset()
dtype = torch.float32
torch.set_float32_matmul_precision('high')
print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}")
x = torch.rand(64, 1, 28, 28, device="cuda").type(torch.float32)
model = Net().to(device="cuda")
x_pt2 = torch.compile(model, mode="max-autotune")(x)
def smoke_test_modules():
cwd = os.getcwd()
for module in MODULES:
if module["repo"]:
if not os.path.exists(f"{cwd}/{module['repo_name']}"):
print(f"Path does not exist: {cwd}/{module['repo_name']}")
subprocess.check_output(f"git clone --depth 1 {module['repo']}", stderr=subprocess.STDOUT, shell=True)
try:
smoke_test_command = f"python3 {module['smoke_test']}"
if target_os == 'windows':
smoke_test_command = f"python {module['smoke_test']}"
output = subprocess.check_output(
smoke_test_command, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}"
)
else:
print("Output: \n{}\n".format(output))
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--package",
help="Package to include in smoke testing",
type=str,
choices=["all", "torchonly"],
default="all",
)
parser.add_argument(
"--runtime-error-check",
help="No Runtime Error check",
type=str,
choices=["enabled", "disabled"],
default="enabled",
)
options = parser.parse_args()
print(f"torch: {torch.__version__}")
check_version(options.package)
smoke_test_conv2d()
smoke_test_linalg()
if options.package == "all":
smoke_test_modules()
smoke_test_cuda(options.package, options.runtime_error_check)
if __name__ == "__main__":
main()
|
r"""
It's used to check basic rnn features with cuda.
For example, it would throw exception if some components are missing
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class SimpleCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
self.pool = nn.MaxPool2d(2, 2)
def forward(self, inputs):
output = self.pool(F.relu(self.conv(inputs)))
output = output.view(1)
return output
# Mock one infer
device = torch.device("cuda:0")
net = SimpleCNN().to(device)
net_inputs = torch.rand((1, 1, 5, 5), device=device)
outputs = net(net_inputs)
print(outputs)
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.1)
# Mock one step training
label = torch.full((1,), 1.0, dtype=torch.float, device=device)
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
|
r"""
It's used to check basic rnn features with cuda.
For example, it would throw exception if missing some components are missing
"""
import torch
import torch.nn as nn
device = torch.device("cuda:0")
rnn = nn.RNN(10, 20, 2).to(device)
inputs = torch.randn(5, 3, 10).to(device)
h0 = torch.randn(2, 3, 20).to(device)
output, hn = rnn(inputs, h0)
|
# Logic copied from PEP 513
def is_manylinux1_compatible():
# Only Linux, and only x86-64 / i686
from distutils.util import get_platform
if get_platform() not in ["linux-x86_64", "linux-i686"]:
return False
# Check for presence of _manylinux module
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
# Fall through to heuristic check below
pass
# Check glibc version. CentOS 5 uses glibc 2.5.
return have_compatible_glibc(2, 5)
def have_compatible_glibc(major, minimum_minor):
import ctypes
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return False
# Call gnu_get_libc_version, which returns a string like "2.5".
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
# Parse string and check against requested version.
version = [int(piece) for piece in version_str.split(".")]
assert len(version) == 2
if major != version[0]:
return False
if minimum_minor > version[1]:
return False
return True
import sys
if is_manylinux1_compatible():
print("%s is manylinux1 compatible" % (sys.executable,))
sys.exit(0)
else:
print("%s is NOT manylinux1 compatible" % (sys.executable,))
sys.exit(1)
|
# cf. https://github.com/pypa/manylinux/issues/53
GOOD_SSL = "https://google.com"
BAD_SSL = "https://self-signed.badssl.com"
import sys
print("Testing SSL certificate checking for Python:", sys.version)
if (sys.version_info[:2] < (2, 7)
or sys.version_info[:2] < (3, 4)):
print("This version never checks SSL certs; skipping tests")
sys.exit(0)
if sys.version_info[0] >= 3:
from urllib.request import urlopen
EXC = OSError
else:
from urllib import urlopen
EXC = IOError
print("Connecting to %s should work" % (GOOD_SSL,))
urlopen(GOOD_SSL)
print("...it did, yay.")
print("Connecting to %s should fail" % (BAD_SSL,))
try:
urlopen(BAD_SSL)
# If we get here then we failed:
print("...it DIDN'T!!!!!11!!1one!")
sys.exit(1)
except EXC:
print("...it did, yay.") |
#!/usr/bin/env python
import argparse
import time
from os import path, makedirs
from datetime import datetime
from collections import defaultdict
from typing import Iterator, List, Type, Dict, Set, TypeVar, Optional
from re import sub, match, search
from packaging.version import parse
import boto3
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('pytorch')
ACCEPTED_FILE_EXTENSIONS = ("whl", "zip", "tar.gz")
ACCEPTED_SUBDIR_PATTERNS = [
r"cu[0-9]+", # for cuda
r"rocm[0-9]+\.[0-9]+", # for rocm
"cpu",
]
PREFIXES_WITH_HTML = {
"whl": "torch_stable.html",
"whl/lts/1.8": "torch_lts.html",
"whl/nightly": "torch_nightly.html",
"whl/test": "torch_test.html",
}
# NOTE: This refers to the name on the wheels themselves and not the name of
# package as specified by setuptools, for packages with "-" (hyphens) in their
# names you need to convert them to "_" (underscores) in order for them to be
# allowed here since the name of the wheels is compared here
PACKAGE_ALLOW_LIST = {
"Pillow",
"certifi",
"charset_normalizer",
"cmake",
"colorama",
"fbgemm_gpu",
"filelock",
"fsspec",
"idna",
"Jinja2",
"lit",
"MarkupSafe",
"mpmath",
"nestedtensor",
"networkx",
"numpy",
"nvidia_cublas_cu11",
"nvidia_cuda_cupti_cu11",
"nvidia_cuda_nvrtc_cu11",
"nvidia_cuda_runtime_cu11",
"nvidia_cudnn_cu11",
"nvidia_cufft_cu11",
"nvidia_curand_cu11",
"nvidia_cusolver_cu11",
"nvidia_cusparse_cu11",
"nvidia_nccl_cu11",
"nvidia_nvtx_cu11",
"nvidia_cublas_cu12",
"nvidia_cuda_cupti_cu12",
"nvidia_cuda_nvrtc_cu12",
"nvidia_cuda_runtime_cu12",
"nvidia_cudnn_cu12",
"nvidia_cufft_cu12",
"nvidia_curand_cu12",
"nvidia_cusolver_cu12",
"nvidia_cusparse_cu12",
"nvidia_nccl_cu12",
"nvidia_nvtx_cu12",
"nvidia_nvjitlink_cu12",
"packaging",
"portalocker",
"pytorch_triton",
"pytorch_triton_rocm",
"requests",
"sympy",
"torch",
"torch_tensorrt",
"torcharrow",
"torchaudio",
"torchcsprng",
"torchdata",
"torchdistx",
"torchmetrics",
"torchrec",
"torchtext",
"torchvision",
"triton",
"tqdm",
"typing_extensions",
"urllib3",
}
# Should match torch-2.0.0.dev20221221+cu118-cp310-cp310-linux_x86_64.whl as:
# Group 1: torch-2.0.0.dev
# Group 2: 20221221
PACKAGE_DATE_REGEX = r"([a-zA-z]*-[0-9.]*.dev)([0-9]*)"
# How many packages should we keep of a specific package?
KEEP_THRESHOLD = 60
S3IndexType = TypeVar('S3IndexType', bound='S3Index')
def extract_package_build_time(full_package_name: str) -> datetime:
result = search(PACKAGE_DATE_REGEX, full_package_name)
if result is not None:
try:
return datetime.strptime(result.group(2), "%Y%m%d")
except ValueError:
# Ignore any value errors since they probably shouldn't be hidden anyways
pass
return datetime.now()
def between_bad_dates(package_build_time: datetime):
start_bad = datetime(year=2022, month=8, day=17)
end_bad = datetime(year=2022, month=12, day=30)
return start_bad <= package_build_time <= end_bad
class S3Index:
def __init__(self: S3IndexType, objects: List[str], prefix: str) -> None:
self.objects = objects
self.prefix = prefix.rstrip("/")
self.html_name = PREFIXES_WITH_HTML[self.prefix]
# should dynamically grab subdirectories like whl/test/cu101
# so we don't need to add them manually anymore
self.subdirs = {
path.dirname(obj) for obj in objects if path.dirname != prefix
}
def nightly_packages_to_show(self: S3IndexType) -> Set[str]:
"""Finding packages to show based on a threshold we specify
Basically takes our S3 packages, normalizes the version for easier
comparisons, then iterates over normalized versions until we reach a
threshold and then starts adding package to delete after that threshold
has been reached
After figuring out what versions we'd like to hide we iterate over
our original object list again and pick out the full paths to the
packages that are included in the list of versions to delete
"""
# also includes versions without GPU specifier (i.e. cu102) for easier
# sorting, sorts in reverse to put the most recent versions first
all_sorted_packages = sorted(
{self.normalize_package_version(obj) for obj in self.objects},
key=lambda name_ver: parse(name_ver.split('-', 1)[-1]),
reverse=True,
)
packages: Dict[str, int] = defaultdict(int)
to_hide: Set[str] = set()
for obj in all_sorted_packages:
full_package_name = path.basename(obj)
package_name = full_package_name.split('-')[0]
package_build_time = extract_package_build_time(full_package_name)
# Hard pass on packages that are included in our allow list
if package_name not in PACKAGE_ALLOW_LIST:
to_hide.add(obj)
continue
if packages[package_name] >= KEEP_THRESHOLD:
to_hide.add(obj)
elif between_bad_dates(package_build_time):
to_hide.add(obj)
else:
packages[package_name] += 1
return set(self.objects).difference({
obj for obj in self.objects
if self.normalize_package_version(obj) in to_hide
})
def is_obj_at_root(self, obj:str) -> bool:
return path.dirname(obj) == self.prefix
def _resolve_subdir(self, subdir: Optional[str] = None) -> str:
if not subdir:
subdir = self.prefix
# make sure we strip any trailing slashes
return subdir.rstrip("/")
def gen_file_list(
self,
subdir: Optional[str]=None,
package_name: Optional[str] = None
) -> Iterator[str]:
objects = (
self.nightly_packages_to_show() if self.prefix == 'whl/nightly'
else self.objects
)
subdir = self._resolve_subdir(subdir) + '/'
for obj in objects:
if package_name is not None:
if self.obj_to_package_name(obj) != package_name:
continue
if self.is_obj_at_root(obj) or obj.startswith(subdir):
yield obj
def get_package_names(self, subdir: Optional[str] = None) -> List[str]:
return sorted(set(self.obj_to_package_name(obj) for obj in self.gen_file_list(subdir)))
def normalize_package_version(self: S3IndexType, obj: str) -> str:
# removes the GPU specifier from the package name as well as
# unnecessary things like the file extension, architecture name, etc.
return sub(
r"%2B.*",
"",
"-".join(path.basename(obj).split("-")[:2])
)
def obj_to_package_name(self, obj: str) -> str:
return path.basename(obj).split('-', 1)[0]
def to_legacy_html(
self,
subdir: Optional[str]=None
) -> str:
"""Generates a string that can be used as the HTML index
Takes our objects and transforms them into HTML that have historically
been used by pip for installing pytorch.
NOTE: These are not PEP 503 compliant but are here for legacy purposes
"""
out: List[str] = []
subdir = self._resolve_subdir(subdir)
is_root = subdir == self.prefix
for obj in self.gen_file_list(subdir):
# Strip our prefix
sanitized_obj = obj.replace(subdir, "", 1)
if sanitized_obj.startswith('/'):
sanitized_obj = sanitized_obj.lstrip("/")
# we include objects at our root prefix so that users can still
# install packages like torchaudio / torchtext even if they want
# to install a specific GPU arch of torch / torchvision
if not is_root and self.is_obj_at_root(obj):
# strip root prefix
sanitized_obj = obj.replace(self.prefix, "", 1).lstrip("/")
sanitized_obj = f"../{sanitized_obj}"
out.append(f'<a href="{sanitized_obj}">{sanitized_obj}</a><br/>')
return "\n".join(sorted(out))
def to_simple_package_html(
self,
subdir: Optional[str],
package_name: str
) -> str:
"""Generates a string that can be used as the package simple HTML index
"""
out: List[str] = []
# Adding html header
out.append('<!DOCTYPE html>')
out.append('<html>')
out.append(' <body>')
out.append(' <h1>Links for {}</h1>'.format(package_name.lower().replace("_","-")))
for obj in sorted(self.gen_file_list(subdir, package_name)):
out.append(f' <a href="/{obj}">{path.basename(obj).replace("%2B","+")}</a><br/>')
# Adding html footer
out.append(' </body>')
out.append('</html>')
out.append('<!--TIMESTAMP {}-->'.format(int(time.time())))
return '\n'.join(out)
def to_simple_packages_html(
self,
subdir: Optional[str],
) -> str:
"""Generates a string that can be used as the simple HTML index
"""
out: List[str] = []
# Adding html header
out.append('<!DOCTYPE html>')
out.append('<html>')
out.append(' <body>')
for pkg_name in sorted(self.get_package_names(subdir)):
out.append(f' <a href="{pkg_name.replace("_","-")}/">{pkg_name.replace("_","-")}</a><br/>')
# Adding html footer
out.append(' </body>')
out.append('</html>')
out.append('<!--TIMESTAMP {}-->'.format(int(time.time())))
return '\n'.join(out)
def upload_legacy_html(self) -> None:
for subdir in self.subdirs:
print(f"INFO Uploading {subdir}/{self.html_name}")
BUCKET.Object(
key=f"{subdir}/{self.html_name}"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_legacy_html(subdir=subdir)
)
def upload_pep503_htmls(self) -> None:
for subdir in self.subdirs:
print(f"INFO Uploading {subdir}/index.html")
BUCKET.Object(
key=f"{subdir}/index.html"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_simple_packages_html(subdir=subdir)
)
for pkg_name in self.get_package_names(subdir=subdir):
compat_pkg_name = pkg_name.lower().replace("_", "-")
print(f"INFO Uploading {subdir}/{compat_pkg_name}/index.html")
BUCKET.Object(
key=f"{subdir}/{compat_pkg_name}/index.html"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_simple_package_html(subdir=subdir, package_name=pkg_name)
)
def save_legacy_html(self) -> None:
for subdir in self.subdirs:
print(f"INFO Saving {subdir}/{self.html_name}")
makedirs(subdir, exist_ok=True)
with open(path.join(subdir, self.html_name), mode="w", encoding="utf-8") as f:
f.write(self.to_legacy_html(subdir=subdir))
def save_pep503_htmls(self) -> None:
for subdir in self.subdirs:
print(f"INFO Saving {subdir}/index.html")
makedirs(subdir, exist_ok=True)
with open(path.join(subdir, "index.html"), mode="w", encoding="utf-8") as f:
f.write(self.to_simple_packages_html(subdir=subdir))
for pkg_name in self.get_package_names(subdir=subdir):
makedirs(path.join(subdir, pkg_name), exist_ok=True)
with open(path.join(subdir, pkg_name, "index.html"), mode="w", encoding="utf-8") as f:
f.write(self.to_simple_package_html(subdir=subdir, package_name=pkg_name))
@classmethod
def from_S3(cls: Type[S3IndexType], prefix: str) -> S3IndexType:
objects = []
prefix = prefix.rstrip("/")
for obj in BUCKET.objects.filter(Prefix=prefix):
is_acceptable = any([path.dirname(obj.key) == prefix] + [
match(
f"{prefix}/{pattern}",
path.dirname(obj.key)
)
for pattern in ACCEPTED_SUBDIR_PATTERNS
]) and obj.key.endswith(ACCEPTED_FILE_EXTENSIONS)
if is_acceptable:
sanitized_key = obj.key.replace("+", "%2B")
objects.append(sanitized_key)
return cls(objects, prefix)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser("Manage S3 HTML indices for PyTorch")
parser.add_argument(
"prefix",
type=str,
choices=list(PREFIXES_WITH_HTML.keys()) + ["all"]
)
parser.add_argument("--do-not-upload", action="store_true")
parser.add_argument("--generate-pep503", action="store_true")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
action = "Saving" if args.do_not_upload else "Uploading"
if args.prefix == 'all':
for prefix in PREFIXES_WITH_HTML.keys():
print(f"INFO: {action} indices for '{prefix}'")
idx = S3Index.from_S3(prefix=prefix)
if args.do_not_upload:
idx.save_legacy_html()
else:
idx.upload_legacy_html()
else:
print(f"INFO: {action} indices for '{args.prefix}'")
idx = S3Index.from_S3(prefix=args.prefix)
if args.do_not_upload:
idx.save_legacy_html()
if args.generate_pep503:
idx.save_pep503_htmls()
else:
idx.upload_legacy_html()
if args.generate_pep503:
idx.upload_pep503_htmls()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Downloads domain pytorch and library packages from channel
# And backs them up to S3
# Do not use unless you know what you are doing
# Usage: python backup_conda.py --version 1.6.0
import conda.api
import boto3
from typing import List, Optional
import urllib
import os
import hashlib
import argparse
S3 = boto3.resource('s3')
BUCKET = S3.Bucket('pytorch-backup')
_known_subdirs = ["linux-64", "osx-64", "osx-arm64", "win-64"]
def compute_md5(path:str) -> str:
with open(path, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
def download_conda_package(package:str, version:Optional[str] = None, depends:Optional[str] = None, channel:Optional[str] = None) -> List[str]:
packages = conda.api.SubdirData.query_all(package, channels = [channel] if channel is not None else None, subdirs = _known_subdirs)
rc = []
for pkg in packages:
if version is not None and pkg.version != version:
continue
if depends is not None and depends not in pkg.depends:
continue
print(f"Downloading {pkg.url}...")
os.makedirs(pkg.subdir, exist_ok = True)
fname = f"{pkg.subdir}/{pkg.fn}"
if not os.path.exists(fname):
with open(fname, "wb") as f:
with urllib.request.urlopen(pkg.url) as url:
f.write(url.read())
if compute_md5(fname) != pkg.md5:
print(f"md5 of {fname} is {compute_md5(fname)} does not match {pkg.md5}")
continue
rc.append(fname)
return rc
def upload_to_s3(prefix: str, fnames: List[str]) -> None:
for fname in fnames:
BUCKET.upload_file(fname, f"{prefix}/{fname}")
print(fname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
help="PyTorch Version to backup",
type=str,
required = True
)
options = parser.parse_args()
rc = download_conda_package("pytorch", channel = "pytorch", version = options.version)
upload_to_s3(f"v{options.version}/conda", rc)
for libname in ["torchvision", "torchaudio", "torchtext"]:
print(f"processing {libname}")
rc = download_conda_package(libname, channel = "pytorch", depends = f"pytorch {options.version}")
upload_to_s3(f"v{options.version}/conda", rc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.