|
import torch |
|
import torch.nn.functional as F |
|
import torch.nn as nn |
|
|
|
from torch import nn, sin, pow |
|
from torch.nn import Parameter |
|
from torch.nn import Conv1d |
|
from torch.nn.utils import weight_norm, remove_weight_norm |
|
|
|
|
|
from .alias import * |
|
|
|
|
|
def init_weights(m, mean=0.0, std=0.01): |
|
classname = m.__class__.__name__ |
|
if classname.find("Conv") != -1: |
|
m.weight.data.normal_(mean, std) |
|
|
|
|
|
def get_padding(kernel_size, dilation=1): |
|
return int((kernel_size*dilation - dilation)/2) |
|
|
|
|
|
class SnakeBeta(nn.Module): |
|
''' |
|
A modified Snake function which uses separate parameters for the magnitude of the periodic components |
|
Shape: |
|
- Input: (B, C, T) |
|
- Output: (B, C, T), same shape as the input |
|
Parameters: |
|
- alpha - trainable parameter that controls frequency |
|
- beta - trainable parameter that controls magnitude |
|
References: |
|
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: |
|
https://arxiv.org/abs/2006.08195 |
|
Examples: |
|
>>> a1 = snakebeta(256) |
|
>>> x = torch.randn(256) |
|
>>> x = a1(x) |
|
''' |
|
|
|
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): |
|
''' |
|
Initialization. |
|
INPUT: |
|
- in_features: shape of the input |
|
- alpha - trainable parameter that controls frequency |
|
- beta - trainable parameter that controls magnitude |
|
alpha is initialized to 1 by default, higher values = higher-frequency. |
|
beta is initialized to 1 by default, higher values = higher-magnitude. |
|
alpha will be trained along with the rest of your model. |
|
''' |
|
super(SnakeBeta, self).__init__() |
|
self.in_features = in_features |
|
|
|
self.alpha_logscale = alpha_logscale |
|
if self.alpha_logscale: |
|
self.alpha = Parameter(torch.zeros(in_features) * alpha) |
|
self.beta = Parameter(torch.zeros(in_features) * alpha) |
|
else: |
|
self.alpha = Parameter(torch.ones(in_features) * alpha) |
|
self.beta = Parameter(torch.ones(in_features) * alpha) |
|
self.alpha.requires_grad = alpha_trainable |
|
self.beta.requires_grad = alpha_trainable |
|
self.no_div_by_zero = 0.000000001 |
|
|
|
def forward(self, x): |
|
''' |
|
Forward pass of the function. |
|
Applies the function to the input elementwise. |
|
SnakeBeta ∶= x + 1/b * sin^2 (xa) |
|
''' |
|
alpha = self.alpha.unsqueeze( |
|
0).unsqueeze(-1) |
|
beta = self.beta.unsqueeze(0).unsqueeze(-1) |
|
if self.alpha_logscale: |
|
alpha = torch.exp(alpha) |
|
beta = torch.exp(beta) |
|
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) |
|
return x |
|
|
|
|
|
class AMPBlock(torch.nn.Module): |
|
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): |
|
super(AMPBlock, self).__init__() |
|
self.h = h |
|
self.convs1 = nn.ModuleList([ |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], |
|
padding=get_padding(kernel_size, dilation[0]))), |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], |
|
padding=get_padding(kernel_size, dilation[1]))), |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], |
|
padding=get_padding(kernel_size, dilation[2]))) |
|
]) |
|
self.convs1.apply(init_weights) |
|
|
|
self.convs2 = nn.ModuleList([ |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
padding=get_padding(kernel_size, 1))), |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
padding=get_padding(kernel_size, 1))), |
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
padding=get_padding(kernel_size, 1))) |
|
]) |
|
self.convs2.apply(init_weights) |
|
|
|
|
|
self.num_layers = len(self.convs1) + len(self.convs2) |
|
|
|
|
|
self.activations = nn.ModuleList([ |
|
Activation1d( |
|
activation=SnakeBeta(channels, alpha_logscale=True)) |
|
for _ in range(self.num_layers) |
|
]) |
|
|
|
def forward(self, x): |
|
acts1, acts2 = self.activations[::2], self.activations[1::2] |
|
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): |
|
xt = a1(x) |
|
xt = c1(xt) |
|
xt = a2(xt) |
|
xt = c2(xt) |
|
x = xt + x |
|
return x |
|
|
|
def remove_weight_norm(self): |
|
for l in self.convs1: |
|
remove_weight_norm(l) |
|
for l in self.convs2: |
|
remove_weight_norm(l) |