_Noxty commited on
Commit
6dd95c2
·
verified ·
1 Parent(s): 8bc4872

Upload 8 files

Browse files
libs/infer_packs/attentions.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ from typing import Optional
4
+
5
+ import numpy as np
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from infer.lib.infer_pack import commons, modules
11
+ from infer.lib.infer_pack.modules import LayerNorm
12
+
13
+
14
+ class Encoder(nn.Module):
15
+ def __init__(
16
+ self,
17
+ hidden_channels,
18
+ filter_channels,
19
+ n_heads,
20
+ n_layers,
21
+ kernel_size=1,
22
+ p_dropout=0.0,
23
+ window_size=10,
24
+ **kwargs
25
+ ):
26
+ super(Encoder, self).__init__()
27
+ self.hidden_channels = hidden_channels
28
+ self.filter_channels = filter_channels
29
+ self.n_heads = n_heads
30
+ self.n_layers = int(n_layers)
31
+ self.kernel_size = kernel_size
32
+ self.p_dropout = p_dropout
33
+ self.window_size = window_size
34
+
35
+ self.drop = nn.Dropout(p_dropout)
36
+ self.attn_layers = nn.ModuleList()
37
+ self.norm_layers_1 = nn.ModuleList()
38
+ self.ffn_layers = nn.ModuleList()
39
+ self.norm_layers_2 = nn.ModuleList()
40
+ for i in range(self.n_layers):
41
+ self.attn_layers.append(
42
+ MultiHeadAttention(
43
+ hidden_channels,
44
+ hidden_channels,
45
+ n_heads,
46
+ p_dropout=p_dropout,
47
+ window_size=window_size,
48
+ )
49
+ )
50
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
51
+ self.ffn_layers.append(
52
+ FFN(
53
+ hidden_channels,
54
+ hidden_channels,
55
+ filter_channels,
56
+ kernel_size,
57
+ p_dropout=p_dropout,
58
+ )
59
+ )
60
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
61
+
62
+ def forward(self, x, x_mask):
63
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
64
+ x = x * x_mask
65
+ zippep = zip(
66
+ self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2
67
+ )
68
+ for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zippep:
69
+ y = attn_layers(x, x, attn_mask)
70
+ y = self.drop(y)
71
+ x = norm_layers_1(x + y)
72
+
73
+ y = ffn_layers(x, x_mask)
74
+ y = self.drop(y)
75
+ x = norm_layers_2(x + y)
76
+ x = x * x_mask
77
+ return x
78
+
79
+
80
+ class Decoder(nn.Module):
81
+ def __init__(
82
+ self,
83
+ hidden_channels,
84
+ filter_channels,
85
+ n_heads,
86
+ n_layers,
87
+ kernel_size=1,
88
+ p_dropout=0.0,
89
+ proximal_bias=False,
90
+ proximal_init=True,
91
+ **kwargs
92
+ ):
93
+ super(Decoder, self).__init__()
94
+ self.hidden_channels = hidden_channels
95
+ self.filter_channels = filter_channels
96
+ self.n_heads = n_heads
97
+ self.n_layers = n_layers
98
+ self.kernel_size = kernel_size
99
+ self.p_dropout = p_dropout
100
+ self.proximal_bias = proximal_bias
101
+ self.proximal_init = proximal_init
102
+
103
+ self.drop = nn.Dropout(p_dropout)
104
+ self.self_attn_layers = nn.ModuleList()
105
+ self.norm_layers_0 = nn.ModuleList()
106
+ self.encdec_attn_layers = nn.ModuleList()
107
+ self.norm_layers_1 = nn.ModuleList()
108
+ self.ffn_layers = nn.ModuleList()
109
+ self.norm_layers_2 = nn.ModuleList()
110
+ for i in range(self.n_layers):
111
+ self.self_attn_layers.append(
112
+ MultiHeadAttention(
113
+ hidden_channels,
114
+ hidden_channels,
115
+ n_heads,
116
+ p_dropout=p_dropout,
117
+ proximal_bias=proximal_bias,
118
+ proximal_init=proximal_init,
119
+ )
120
+ )
121
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
122
+ self.encdec_attn_layers.append(
123
+ MultiHeadAttention(
124
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
125
+ )
126
+ )
127
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
128
+ self.ffn_layers.append(
129
+ FFN(
130
+ hidden_channels,
131
+ hidden_channels,
132
+ filter_channels,
133
+ kernel_size,
134
+ p_dropout=p_dropout,
135
+ causal=True,
136
+ )
137
+ )
138
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
139
+
140
+ def forward(self, x, x_mask, h, h_mask):
141
+ """
142
+ x: decoder input
143
+ h: encoder output
144
+ """
145
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
146
+ device=x.device, dtype=x.dtype
147
+ )
148
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
149
+ x = x * x_mask
150
+ for i in range(self.n_layers):
151
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
152
+ y = self.drop(y)
153
+ x = self.norm_layers_0[i](x + y)
154
+
155
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
156
+ y = self.drop(y)
157
+ x = self.norm_layers_1[i](x + y)
158
+
159
+ y = self.ffn_layers[i](x, x_mask)
160
+ y = self.drop(y)
161
+ x = self.norm_layers_2[i](x + y)
162
+ x = x * x_mask
163
+ return x
164
+
165
+
166
+ class MultiHeadAttention(nn.Module):
167
+ def __init__(
168
+ self,
169
+ channels,
170
+ out_channels,
171
+ n_heads,
172
+ p_dropout=0.0,
173
+ window_size=None,
174
+ heads_share=True,
175
+ block_length=None,
176
+ proximal_bias=False,
177
+ proximal_init=False,
178
+ ):
179
+ super(MultiHeadAttention, self).__init__()
180
+ assert channels % n_heads == 0
181
+
182
+ self.channels = channels
183
+ self.out_channels = out_channels
184
+ self.n_heads = n_heads
185
+ self.p_dropout = p_dropout
186
+ self.window_size = window_size
187
+ self.heads_share = heads_share
188
+ self.block_length = block_length
189
+ self.proximal_bias = proximal_bias
190
+ self.proximal_init = proximal_init
191
+ self.attn = None
192
+
193
+ self.k_channels = channels // n_heads
194
+ self.conv_q = nn.Conv1d(channels, channels, 1)
195
+ self.conv_k = nn.Conv1d(channels, channels, 1)
196
+ self.conv_v = nn.Conv1d(channels, channels, 1)
197
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
198
+ self.drop = nn.Dropout(p_dropout)
199
+
200
+ if window_size is not None:
201
+ n_heads_rel = 1 if heads_share else n_heads
202
+ rel_stddev = self.k_channels**-0.5
203
+ self.emb_rel_k = nn.Parameter(
204
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
+ * rel_stddev
206
+ )
207
+ self.emb_rel_v = nn.Parameter(
208
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
209
+ * rel_stddev
210
+ )
211
+
212
+ nn.init.xavier_uniform_(self.conv_q.weight)
213
+ nn.init.xavier_uniform_(self.conv_k.weight)
214
+ nn.init.xavier_uniform_(self.conv_v.weight)
215
+ if proximal_init:
216
+ with torch.no_grad():
217
+ self.conv_k.weight.copy_(self.conv_q.weight)
218
+ self.conv_k.bias.copy_(self.conv_q.bias)
219
+
220
+ def forward(
221
+ self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None
222
+ ):
223
+ q = self.conv_q(x)
224
+ k = self.conv_k(c)
225
+ v = self.conv_v(c)
226
+
227
+ x, _ = self.attention(q, k, v, mask=attn_mask)
228
+
229
+ x = self.conv_o(x)
230
+ return x
231
+
232
+ def attention(
233
+ self,
234
+ query: torch.Tensor,
235
+ key: torch.Tensor,
236
+ value: torch.Tensor,
237
+ mask: Optional[torch.Tensor] = None,
238
+ ):
239
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
240
+ b, d, t_s = key.size()
241
+ t_t = query.size(2)
242
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
243
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
244
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
245
+
246
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
247
+ if self.window_size is not None:
248
+ assert (
249
+ t_s == t_t
250
+ ), "Relative attention is only available for self-attention."
251
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
252
+ rel_logits = self._matmul_with_relative_keys(
253
+ query / math.sqrt(self.k_channels), key_relative_embeddings
254
+ )
255
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
256
+ scores = scores + scores_local
257
+ if self.proximal_bias:
258
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
259
+ scores = scores + self._attention_bias_proximal(t_s).to(
260
+ device=scores.device, dtype=scores.dtype
261
+ )
262
+ if mask is not None:
263
+ scores = scores.masked_fill(mask == 0, -1e4)
264
+ if self.block_length is not None:
265
+ assert (
266
+ t_s == t_t
267
+ ), "Local attention is only available for self-attention."
268
+ block_mask = (
269
+ torch.ones_like(scores)
270
+ .triu(-self.block_length)
271
+ .tril(self.block_length)
272
+ )
273
+ scores = scores.masked_fill(block_mask == 0, -1e4)
274
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
275
+ p_attn = self.drop(p_attn)
276
+ output = torch.matmul(p_attn, value)
277
+ if self.window_size is not None:
278
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
279
+ value_relative_embeddings = self._get_relative_embeddings(
280
+ self.emb_rel_v, t_s
281
+ )
282
+ output = output + self._matmul_with_relative_values(
283
+ relative_weights, value_relative_embeddings
284
+ )
285
+ output = (
286
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
287
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
288
+ return output, p_attn
289
+
290
+ def _matmul_with_relative_values(self, x, y):
291
+ """
292
+ x: [b, h, l, m]
293
+ y: [h or 1, m, d]
294
+ ret: [b, h, l, d]
295
+ """
296
+ ret = torch.matmul(x, y.unsqueeze(0))
297
+ return ret
298
+
299
+ def _matmul_with_relative_keys(self, x, y):
300
+ """
301
+ x: [b, h, l, d]
302
+ y: [h or 1, m, d]
303
+ ret: [b, h, l, m]
304
+ """
305
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
306
+ return ret
307
+
308
+ def _get_relative_embeddings(self, relative_embeddings, length: int):
309
+ max_relative_position = 2 * self.window_size + 1
310
+ # Pad first before slice to avoid using cond ops.
311
+ pad_length: int = max(length - (self.window_size + 1), 0)
312
+ slice_start_position = max((self.window_size + 1) - length, 0)
313
+ slice_end_position = slice_start_position + 2 * length - 1
314
+ if pad_length > 0:
315
+ padded_relative_embeddings = F.pad(
316
+ relative_embeddings,
317
+ # commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
318
+ [0, 0, pad_length, pad_length, 0, 0],
319
+ )
320
+ else:
321
+ padded_relative_embeddings = relative_embeddings
322
+ used_relative_embeddings = padded_relative_embeddings[
323
+ :, slice_start_position:slice_end_position
324
+ ]
325
+ return used_relative_embeddings
326
+
327
+ def _relative_position_to_absolute_position(self, x):
328
+ """
329
+ x: [b, h, l, 2*l-1]
330
+ ret: [b, h, l, l]
331
+ """
332
+ batch, heads, length, _ = x.size()
333
+ # Concat columns of pad to shift from relative to absolute indexing.
334
+ x = F.pad(
335
+ x,
336
+ # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
337
+ [0, 1, 0, 0, 0, 0, 0, 0],
338
+ )
339
+
340
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
341
+ x_flat = x.view([batch, heads, length * 2 * length])
342
+ x_flat = F.pad(
343
+ x_flat,
344
+ # commons.convert_pad_shape([[0, 0], [0, 0], [0, int(length) - 1]])
345
+ [0, int(length) - 1, 0, 0, 0, 0],
346
+ )
347
+
348
+ # Reshape and slice out the padded elements.
349
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
350
+ :, :, :length, length - 1 :
351
+ ]
352
+ return x_final
353
+
354
+ def _absolute_position_to_relative_position(self, x):
355
+ """
356
+ x: [b, h, l, l]
357
+ ret: [b, h, l, 2*l-1]
358
+ """
359
+ batch, heads, length, _ = x.size()
360
+ # padd along column
361
+ x = F.pad(
362
+ x,
363
+ # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, int(length) - 1]])
364
+ [0, int(length) - 1, 0, 0, 0, 0, 0, 0],
365
+ )
366
+ x_flat = x.view([batch, heads, int(length**2) + int(length * (length - 1))])
367
+ # add 0's in the beginning that will skew the elements after reshape
368
+ x_flat = F.pad(
369
+ x_flat,
370
+ # commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]])
371
+ [length, 0, 0, 0, 0, 0],
372
+ )
373
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
374
+ return x_final
375
+
376
+ def _attention_bias_proximal(self, length: int):
377
+ """Bias for self-attention to encourage attention to close positions.
378
+ Args:
379
+ length: an integer scalar.
380
+ Returns:
381
+ a Tensor with shape [1, 1, length, length]
382
+ """
383
+ r = torch.arange(length, dtype=torch.float32)
384
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
385
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
386
+
387
+
388
+ class FFN(nn.Module):
389
+ def __init__(
390
+ self,
391
+ in_channels,
392
+ out_channels,
393
+ filter_channels,
394
+ kernel_size,
395
+ p_dropout=0.0,
396
+ activation: str = None,
397
+ causal=False,
398
+ ):
399
+ super(FFN, self).__init__()
400
+ self.in_channels = in_channels
401
+ self.out_channels = out_channels
402
+ self.filter_channels = filter_channels
403
+ self.kernel_size = kernel_size
404
+ self.p_dropout = p_dropout
405
+ self.activation = activation
406
+ self.causal = causal
407
+ self.is_activation = True if activation == "gelu" else False
408
+ # if causal:
409
+ # self.padding = self._causal_padding
410
+ # else:
411
+ # self.padding = self._same_padding
412
+
413
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
414
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
415
+ self.drop = nn.Dropout(p_dropout)
416
+
417
+ def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:
418
+ if self.causal:
419
+ padding = self._causal_padding(x * x_mask)
420
+ else:
421
+ padding = self._same_padding(x * x_mask)
422
+ return padding
423
+
424
+ def forward(self, x: torch.Tensor, x_mask: torch.Tensor):
425
+ x = self.conv_1(self.padding(x, x_mask))
426
+ if self.is_activation:
427
+ x = x * torch.sigmoid(1.702 * x)
428
+ else:
429
+ x = torch.relu(x)
430
+ x = self.drop(x)
431
+
432
+ x = self.conv_2(self.padding(x, x_mask))
433
+ return x * x_mask
434
+
435
+ def _causal_padding(self, x):
436
+ if self.kernel_size == 1:
437
+ return x
438
+ pad_l: int = self.kernel_size - 1
439
+ pad_r: int = 0
440
+ # padding = [[0, 0], [0, 0], [pad_l, pad_r]]
441
+ x = F.pad(
442
+ x,
443
+ # commons.convert_pad_shape(padding)
444
+ [pad_l, pad_r, 0, 0, 0, 0],
445
+ )
446
+ return x
447
+
448
+ def _same_padding(self, x):
449
+ if self.kernel_size == 1:
450
+ return x
451
+ pad_l: int = (self.kernel_size - 1) // 2
452
+ pad_r: int = self.kernel_size // 2
453
+ # padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
+ x = F.pad(
455
+ x,
456
+ # commons.convert_pad_shape(padding)
457
+ [pad_l, pad_r, 0, 0, 0, 0],
458
+ )
459
+ return x
libs/infer_packs/attentions_onnx.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ############################## Warning! ##############################
2
+ # #
3
+ # Onnx Export Not Support All Of Non-Torch Types #
4
+ # Include Python Built-in Types!!!!!!!!!!!!!!!!! #
5
+ # If You Want TO Change This File #
6
+ # Do Not Use All Of Non-Torch Types! #
7
+ # #
8
+ ############################## Warning! ##############################
9
+ import copy
10
+ import math
11
+ from typing import Optional
12
+
13
+ import numpy as np
14
+ import torch
15
+ from torch import nn
16
+ from torch.nn import functional as F
17
+
18
+ from infer.lib.infer_pack import commons, modules
19
+ from infer.lib.infer_pack.modules import LayerNorm
20
+
21
+
22
+ class Encoder(nn.Module):
23
+ def __init__(
24
+ self,
25
+ hidden_channels,
26
+ filter_channels,
27
+ n_heads,
28
+ n_layers,
29
+ kernel_size=1,
30
+ p_dropout=0.0,
31
+ window_size=10,
32
+ **kwargs
33
+ ):
34
+ super(Encoder, self).__init__()
35
+ self.hidden_channels = hidden_channels
36
+ self.filter_channels = filter_channels
37
+ self.n_heads = n_heads
38
+ self.n_layers = int(n_layers)
39
+ self.kernel_size = kernel_size
40
+ self.p_dropout = p_dropout
41
+ self.window_size = window_size
42
+
43
+ self.drop = nn.Dropout(p_dropout)
44
+ self.attn_layers = nn.ModuleList()
45
+ self.norm_layers_1 = nn.ModuleList()
46
+ self.ffn_layers = nn.ModuleList()
47
+ self.norm_layers_2 = nn.ModuleList()
48
+ for i in range(self.n_layers):
49
+ self.attn_layers.append(
50
+ MultiHeadAttention(
51
+ hidden_channels,
52
+ hidden_channels,
53
+ n_heads,
54
+ p_dropout=p_dropout,
55
+ window_size=window_size,
56
+ )
57
+ )
58
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
59
+ self.ffn_layers.append(
60
+ FFN(
61
+ hidden_channels,
62
+ hidden_channels,
63
+ filter_channels,
64
+ kernel_size,
65
+ p_dropout=p_dropout,
66
+ )
67
+ )
68
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
69
+
70
+ def forward(self, x, x_mask):
71
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
72
+ x = x * x_mask
73
+ zippep = zip(
74
+ self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2
75
+ )
76
+ for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zippep:
77
+ y = attn_layers(x, x, attn_mask)
78
+ y = self.drop(y)
79
+ x = norm_layers_1(x + y)
80
+
81
+ y = ffn_layers(x, x_mask)
82
+ y = self.drop(y)
83
+ x = norm_layers_2(x + y)
84
+ x = x * x_mask
85
+ return x
86
+
87
+
88
+ class Decoder(nn.Module):
89
+ def __init__(
90
+ self,
91
+ hidden_channels,
92
+ filter_channels,
93
+ n_heads,
94
+ n_layers,
95
+ kernel_size=1,
96
+ p_dropout=0.0,
97
+ proximal_bias=False,
98
+ proximal_init=True,
99
+ **kwargs
100
+ ):
101
+ super(Decoder, self).__init__()
102
+ self.hidden_channels = hidden_channels
103
+ self.filter_channels = filter_channels
104
+ self.n_heads = n_heads
105
+ self.n_layers = n_layers
106
+ self.kernel_size = kernel_size
107
+ self.p_dropout = p_dropout
108
+ self.proximal_bias = proximal_bias
109
+ self.proximal_init = proximal_init
110
+
111
+ self.drop = nn.Dropout(p_dropout)
112
+ self.self_attn_layers = nn.ModuleList()
113
+ self.norm_layers_0 = nn.ModuleList()
114
+ self.encdec_attn_layers = nn.ModuleList()
115
+ self.norm_layers_1 = nn.ModuleList()
116
+ self.ffn_layers = nn.ModuleList()
117
+ self.norm_layers_2 = nn.ModuleList()
118
+ for i in range(self.n_layers):
119
+ self.self_attn_layers.append(
120
+ MultiHeadAttention(
121
+ hidden_channels,
122
+ hidden_channels,
123
+ n_heads,
124
+ p_dropout=p_dropout,
125
+ proximal_bias=proximal_bias,
126
+ proximal_init=proximal_init,
127
+ )
128
+ )
129
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
130
+ self.encdec_attn_layers.append(
131
+ MultiHeadAttention(
132
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
133
+ )
134
+ )
135
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
136
+ self.ffn_layers.append(
137
+ FFN(
138
+ hidden_channels,
139
+ hidden_channels,
140
+ filter_channels,
141
+ kernel_size,
142
+ p_dropout=p_dropout,
143
+ causal=True,
144
+ )
145
+ )
146
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
147
+
148
+ def forward(self, x, x_mask, h, h_mask):
149
+ """
150
+ x: decoder input
151
+ h: encoder output
152
+ """
153
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
154
+ device=x.device, dtype=x.dtype
155
+ )
156
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
157
+ x = x * x_mask
158
+ for i in range(self.n_layers):
159
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
160
+ y = self.drop(y)
161
+ x = self.norm_layers_0[i](x + y)
162
+
163
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
164
+ y = self.drop(y)
165
+ x = self.norm_layers_1[i](x + y)
166
+
167
+ y = self.ffn_layers[i](x, x_mask)
168
+ y = self.drop(y)
169
+ x = self.norm_layers_2[i](x + y)
170
+ x = x * x_mask
171
+ return x
172
+
173
+
174
+ class MultiHeadAttention(nn.Module):
175
+ def __init__(
176
+ self,
177
+ channels,
178
+ out_channels,
179
+ n_heads,
180
+ p_dropout=0.0,
181
+ window_size=None,
182
+ heads_share=True,
183
+ block_length=None,
184
+ proximal_bias=False,
185
+ proximal_init=False,
186
+ ):
187
+ super(MultiHeadAttention, self).__init__()
188
+ assert channels % n_heads == 0
189
+
190
+ self.channels = channels
191
+ self.out_channels = out_channels
192
+ self.n_heads = n_heads
193
+ self.p_dropout = p_dropout
194
+ self.window_size = window_size
195
+ self.heads_share = heads_share
196
+ self.block_length = block_length
197
+ self.proximal_bias = proximal_bias
198
+ self.proximal_init = proximal_init
199
+ self.attn = None
200
+
201
+ self.k_channels = channels // n_heads
202
+ self.conv_q = nn.Conv1d(channels, channels, 1)
203
+ self.conv_k = nn.Conv1d(channels, channels, 1)
204
+ self.conv_v = nn.Conv1d(channels, channels, 1)
205
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
206
+ self.drop = nn.Dropout(p_dropout)
207
+
208
+ if window_size is not None:
209
+ n_heads_rel = 1 if heads_share else n_heads
210
+ rel_stddev = self.k_channels**-0.5
211
+ self.emb_rel_k = nn.Parameter(
212
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
213
+ * rel_stddev
214
+ )
215
+ self.emb_rel_v = nn.Parameter(
216
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
217
+ * rel_stddev
218
+ )
219
+
220
+ nn.init.xavier_uniform_(self.conv_q.weight)
221
+ nn.init.xavier_uniform_(self.conv_k.weight)
222
+ nn.init.xavier_uniform_(self.conv_v.weight)
223
+ if proximal_init:
224
+ with torch.no_grad():
225
+ self.conv_k.weight.copy_(self.conv_q.weight)
226
+ self.conv_k.bias.copy_(self.conv_q.bias)
227
+
228
+ def forward(
229
+ self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None
230
+ ):
231
+ q = self.conv_q(x)
232
+ k = self.conv_k(c)
233
+ v = self.conv_v(c)
234
+
235
+ x, _ = self.attention(q, k, v, mask=attn_mask)
236
+
237
+ x = self.conv_o(x)
238
+ return x
239
+
240
+ def attention(
241
+ self,
242
+ query: torch.Tensor,
243
+ key: torch.Tensor,
244
+ value: torch.Tensor,
245
+ mask: Optional[torch.Tensor] = None,
246
+ ):
247
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
248
+ b, d, t_s = key.size()
249
+ t_t = query.size(2)
250
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
251
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
252
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
253
+
254
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
255
+ if self.window_size is not None:
256
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
257
+ rel_logits = self._matmul_with_relative_keys(
258
+ query / math.sqrt(self.k_channels), key_relative_embeddings
259
+ )
260
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
261
+ scores = scores + scores_local
262
+ if self.proximal_bias:
263
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
264
+ scores = scores + self._attention_bias_proximal(t_s).to(
265
+ device=scores.device, dtype=scores.dtype
266
+ )
267
+ if mask is not None:
268
+ scores = scores.masked_fill(mask == 0, -1e4)
269
+ if self.block_length is not None:
270
+ assert (
271
+ t_s == t_t
272
+ ), "Local attention is only available for self-attention."
273
+ block_mask = (
274
+ torch.ones_like(scores)
275
+ .triu(-self.block_length)
276
+ .tril(self.block_length)
277
+ )
278
+ scores = scores.masked_fill(block_mask == 0, -1e4)
279
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
280
+ p_attn = self.drop(p_attn)
281
+ output = torch.matmul(p_attn, value)
282
+ if self.window_size is not None:
283
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
284
+ value_relative_embeddings = self._get_relative_embeddings(
285
+ self.emb_rel_v, t_s
286
+ )
287
+ output = output + self._matmul_with_relative_values(
288
+ relative_weights, value_relative_embeddings
289
+ )
290
+ output = (
291
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
292
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
293
+ return output, p_attn
294
+
295
+ def _matmul_with_relative_values(self, x, y):
296
+ """
297
+ x: [b, h, l, m]
298
+ y: [h or 1, m, d]
299
+ ret: [b, h, l, d]
300
+ """
301
+ ret = torch.matmul(x, y.unsqueeze(0))
302
+ return ret
303
+
304
+ def _matmul_with_relative_keys(self, x, y):
305
+ """
306
+ x: [b, h, l, d]
307
+ y: [h or 1, m, d]
308
+ ret: [b, h, l, m]
309
+ """
310
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
311
+ return ret
312
+
313
+ def _get_relative_embeddings(self, relative_embeddings, length):
314
+ max_relative_position = 2 * self.window_size + 1
315
+ # Pad first before slice to avoid using cond ops.
316
+
317
+ pad_length = torch.clamp(length - (self.window_size + 1), min=0)
318
+ slice_start_position = torch.clamp((self.window_size + 1) - length, min=0)
319
+ slice_end_position = slice_start_position + 2 * length - 1
320
+ padded_relative_embeddings = F.pad(
321
+ relative_embeddings,
322
+ # commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
323
+ [0, 0, pad_length, pad_length, 0, 0],
324
+ )
325
+ used_relative_embeddings = padded_relative_embeddings[
326
+ :, slice_start_position:slice_end_position
327
+ ]
328
+ return used_relative_embeddings
329
+
330
+ def _relative_position_to_absolute_position(self, x):
331
+ """
332
+ x: [b, h, l, 2*l-1]
333
+ ret: [b, h, l, l]
334
+ """
335
+ batch, heads, length, _ = x.size()
336
+ # Concat columns of pad to shift from relative to absolute indexing.
337
+ x = F.pad(
338
+ x,
339
+ # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
340
+ [0, 1, 0, 0, 0, 0, 0, 0],
341
+ )
342
+
343
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
344
+ x_flat = x.view([batch, heads, length * 2 * length])
345
+ x_flat = F.pad(
346
+ x_flat,
347
+ [0, length - 1, 0, 0, 0, 0],
348
+ )
349
+
350
+ # Reshape and slice out the padded elements.
351
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
352
+ :, :, :length, length - 1 :
353
+ ]
354
+ return x_final
355
+
356
+ def _absolute_position_to_relative_position(self, x):
357
+ """
358
+ x: [b, h, l, l]
359
+ ret: [b, h, l, 2*l-1]
360
+ """
361
+ batch, heads, length, _ = x.size()
362
+ # padd along column
363
+ x = F.pad(
364
+ x,
365
+ [0, length - 1, 0, 0, 0, 0, 0, 0],
366
+ )
367
+ x_flat = x.view([batch, heads, length*length + length * (length - 1)])
368
+ # add 0's in the beginning that will skew the elements after reshape
369
+ x_flat = F.pad(
370
+ x_flat,
371
+ [length, 0, 0, 0, 0, 0],
372
+ )
373
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
374
+ return x_final
375
+
376
+ def _attention_bias_proximal(self, length):
377
+ """Bias for self-attention to encourage attention to close positions.
378
+ Args:
379
+ length: an integer scalar.
380
+ Returns:
381
+ a Tensor with shape [1, 1, length, length]
382
+ """
383
+ r = torch.arange(length, dtype=torch.float32)
384
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
385
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
386
+
387
+
388
+ class FFN(nn.Module):
389
+ def __init__(
390
+ self,
391
+ in_channels,
392
+ out_channels,
393
+ filter_channels,
394
+ kernel_size,
395
+ p_dropout=0.0,
396
+ activation: str = None,
397
+ causal=False,
398
+ ):
399
+ super(FFN, self).__init__()
400
+ self.in_channels = in_channels
401
+ self.out_channels = out_channels
402
+ self.filter_channels = filter_channels
403
+ self.kernel_size = kernel_size
404
+ self.p_dropout = p_dropout
405
+ self.activation = activation
406
+ self.causal = causal
407
+ self.is_activation = True if activation == "gelu" else False
408
+ # if causal:
409
+ # self.padding = self._causal_padding
410
+ # else:
411
+ # self.padding = self._same_padding
412
+
413
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
414
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
415
+ self.drop = nn.Dropout(p_dropout)
416
+
417
+ def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:
418
+ if self.causal:
419
+ padding = self._causal_padding(x * x_mask)
420
+ else:
421
+ padding = self._same_padding(x * x_mask)
422
+ return padding
423
+
424
+ def forward(self, x: torch.Tensor, x_mask: torch.Tensor):
425
+ x = self.conv_1(self.padding(x, x_mask))
426
+ if self.is_activation:
427
+ x = x * torch.sigmoid(1.702 * x)
428
+ else:
429
+ x = torch.relu(x)
430
+ x = self.drop(x)
431
+
432
+ x = self.conv_2(self.padding(x, x_mask))
433
+ return x * x_mask
434
+
435
+ def _causal_padding(self, x):
436
+ if self.kernel_size == 1:
437
+ return x
438
+ pad_l = self.kernel_size - 1
439
+ pad_r = 0
440
+ # padding = [[0, 0], [0, 0], [pad_l, pad_r]]
441
+ x = F.pad(
442
+ x,
443
+ # commons.convert_pad_shape(padding)
444
+ [pad_l, pad_r, 0, 0, 0, 0],
445
+ )
446
+ return x
447
+
448
+ def _same_padding(self, x):
449
+ if self.kernel_size == 1:
450
+ return x
451
+ pad_l = (self.kernel_size - 1) // 2
452
+ pad_r = self.kernel_size // 2
453
+ # padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
+ x = F.pad(
455
+ x,
456
+ # commons.convert_pad_shape(padding)
457
+ [pad_l, pad_r, 0, 0, 0, 0],
458
+ )
459
+ return x
libs/infer_packs/commons.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+
10
+ def init_weights(m, mean=0.0, std=0.01):
11
+ classname = m.__class__.__name__
12
+ if classname.find("Conv") != -1:
13
+ m.weight.data.normal_(mean, std)
14
+
15
+
16
+ def get_padding(kernel_size, dilation=1):
17
+ return int((kernel_size * dilation - dilation) / 2)
18
+
19
+
20
+ # def convert_pad_shape(pad_shape):
21
+ # l = pad_shape[::-1]
22
+ # pad_shape = [item for sublist in l for item in sublist]
23
+ # return pad_shape
24
+
25
+
26
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
27
+ """KL(P||Q)"""
28
+ kl = (logs_q - logs_p) - 0.5
29
+ kl += (
30
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
31
+ )
32
+ return kl
33
+
34
+
35
+ def rand_gumbel(shape):
36
+ """Sample from the Gumbel distribution, protect from overflows."""
37
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
38
+ return -torch.log(-torch.log(uniform_samples))
39
+
40
+
41
+ def rand_gumbel_like(x):
42
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
43
+ return g
44
+
45
+
46
+ def slice_segments(x, ids_str, segment_size=4):
47
+ ret = torch.zeros_like(x[:, :, :segment_size])
48
+ for i in range(x.size(0)):
49
+ idx_str = ids_str[i]
50
+ idx_end = idx_str + segment_size
51
+ ret[i] = x[i, :, idx_str:idx_end]
52
+ return ret
53
+
54
+
55
+ def slice_segments2(x, ids_str, segment_size=4):
56
+ ret = torch.zeros_like(x[:, :segment_size])
57
+ for i in range(x.size(0)):
58
+ idx_str = ids_str[i]
59
+ idx_end = idx_str + segment_size
60
+ ret[i] = x[i, idx_str:idx_end]
61
+ return ret
62
+
63
+
64
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
65
+ b, d, t = x.size()
66
+ if x_lengths is None:
67
+ x_lengths = t
68
+ ids_str_max = x_lengths - segment_size + 1
69
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
70
+ ret = slice_segments(x, ids_str, segment_size)
71
+ return ret, ids_str
72
+
73
+
74
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
75
+ position = torch.arange(length, dtype=torch.float)
76
+ num_timescales = channels // 2
77
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
78
+ num_timescales - 1
79
+ )
80
+ inv_timescales = min_timescale * torch.exp(
81
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
82
+ )
83
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
84
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
85
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
86
+ signal = signal.view(1, channels, length)
87
+ return signal
88
+
89
+
90
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
91
+ b, channels, length = x.size()
92
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
93
+ return x + signal.to(dtype=x.dtype, device=x.device)
94
+
95
+
96
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
97
+ b, channels, length = x.size()
98
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
99
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
100
+
101
+
102
+ def subsequent_mask(length):
103
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
104
+ return mask
105
+
106
+
107
+ @torch.jit.script
108
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
109
+ n_channels_int = n_channels[0]
110
+ in_act = input_a + input_b
111
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
112
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
113
+ acts = t_act * s_act
114
+ return acts
115
+
116
+
117
+ # def convert_pad_shape(pad_shape):
118
+ # l = pad_shape[::-1]
119
+ # pad_shape = [item for sublist in l for item in sublist]
120
+ # return pad_shape
121
+
122
+
123
+ def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:
124
+ return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist()
125
+
126
+
127
+ def shift_1d(x):
128
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
+ return x
130
+
131
+
132
+ def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
133
+ if max_length is None:
134
+ max_length = length.max()
135
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
+ return x.unsqueeze(0) < length.unsqueeze(1)
137
+
138
+
139
+ def generate_path(duration, mask):
140
+ """
141
+ duration: [b, 1, t_x]
142
+ mask: [b, 1, t_y, t_x]
143
+ """
144
+ device = duration.device
145
+
146
+ b, _, t_y, t_x = mask.shape
147
+ cum_duration = torch.cumsum(duration, -1)
148
+
149
+ cum_duration_flat = cum_duration.view(b * t_x)
150
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
+ path = path.view(b, t_x, t_y)
152
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
+ path = path.unsqueeze(1).transpose(2, 3) * mask
154
+ return path
155
+
156
+
157
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
158
+ if isinstance(parameters, torch.Tensor):
159
+ parameters = [parameters]
160
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
161
+ norm_type = float(norm_type)
162
+ if clip_value is not None:
163
+ clip_value = float(clip_value)
164
+
165
+ total_norm = 0
166
+ for p in parameters:
167
+ param_norm = p.grad.data.norm(norm_type)
168
+ total_norm += param_norm.item() ** norm_type
169
+ if clip_value is not None:
170
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
+ total_norm = total_norm ** (1.0 / norm_type)
172
+ return total_norm
libs/infer_packs/models.py ADDED
@@ -0,0 +1,1223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import logging
3
+ from typing import Optional
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
11
+ from torch.nn import functional as F
12
+ from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
13
+ from infer.lib.infer_pack import attentions, commons, modules
14
+ from infer.lib.infer_pack.commons import get_padding, init_weights
15
+
16
+ has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available())
17
+
18
+
19
+ class TextEncoder(nn.Module):
20
+ def __init__(
21
+ self,
22
+ in_channels,
23
+ out_channels,
24
+ hidden_channels,
25
+ filter_channels,
26
+ n_heads,
27
+ n_layers,
28
+ kernel_size,
29
+ p_dropout,
30
+ f0=True,
31
+ ):
32
+ super(TextEncoder, self).__init__()
33
+ self.out_channels = out_channels
34
+ self.hidden_channels = hidden_channels
35
+ self.filter_channels = filter_channels
36
+ self.n_heads = n_heads
37
+ self.n_layers = n_layers
38
+ self.kernel_size = kernel_size
39
+ self.p_dropout = float(p_dropout)
40
+ self.emb_phone = nn.Linear(in_channels, hidden_channels)
41
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
42
+ if f0 == True:
43
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
44
+ self.encoder = attentions.Encoder(
45
+ hidden_channels,
46
+ filter_channels,
47
+ n_heads,
48
+ n_layers,
49
+ kernel_size,
50
+ float(p_dropout),
51
+ )
52
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
53
+
54
+ def forward(
55
+ self,
56
+ phone: torch.Tensor,
57
+ pitch: torch.Tensor,
58
+ lengths: torch.Tensor,
59
+ skip_head: Optional[torch.Tensor] = None,
60
+ ):
61
+ if pitch is None:
62
+ x = self.emb_phone(phone)
63
+ else:
64
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
65
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
66
+ x = self.lrelu(x)
67
+ x = torch.transpose(x, 1, -1) # [b, h, t]
68
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
69
+ x.dtype
70
+ )
71
+ x = self.encoder(x * x_mask, x_mask)
72
+ if skip_head is not None:
73
+ assert isinstance(skip_head, torch.Tensor)
74
+ head = int(skip_head.item())
75
+ x = x[:, :, head:]
76
+ x_mask = x_mask[:, :, head:]
77
+ stats = self.proj(x) * x_mask
78
+ m, logs = torch.split(stats, self.out_channels, dim=1)
79
+ return m, logs, x_mask
80
+
81
+
82
+ class ResidualCouplingBlock(nn.Module):
83
+ def __init__(
84
+ self,
85
+ channels,
86
+ hidden_channels,
87
+ kernel_size,
88
+ dilation_rate,
89
+ n_layers,
90
+ n_flows=4,
91
+ gin_channels=0,
92
+ ):
93
+ super(ResidualCouplingBlock, self).__init__()
94
+ self.channels = channels
95
+ self.hidden_channels = hidden_channels
96
+ self.kernel_size = kernel_size
97
+ self.dilation_rate = dilation_rate
98
+ self.n_layers = n_layers
99
+ self.n_flows = n_flows
100
+ self.gin_channels = gin_channels
101
+
102
+ self.flows = nn.ModuleList()
103
+ for i in range(n_flows):
104
+ self.flows.append(
105
+ modules.ResidualCouplingLayer(
106
+ channels,
107
+ hidden_channels,
108
+ kernel_size,
109
+ dilation_rate,
110
+ n_layers,
111
+ gin_channels=gin_channels,
112
+ mean_only=True,
113
+ )
114
+ )
115
+ self.flows.append(modules.Flip())
116
+
117
+ def forward(
118
+ self,
119
+ x: torch.Tensor,
120
+ x_mask: torch.Tensor,
121
+ g: Optional[torch.Tensor] = None,
122
+ reverse: bool = False,
123
+ ):
124
+ if not reverse:
125
+ for flow in self.flows:
126
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
127
+ else:
128
+ for flow in self.flows[::-1]:
129
+ x, _ = flow.forward(x, x_mask, g=g, reverse=reverse)
130
+ return x
131
+
132
+ def remove_weight_norm(self):
133
+ for i in range(self.n_flows):
134
+ self.flows[i * 2].remove_weight_norm()
135
+
136
+ def __prepare_scriptable__(self):
137
+ for i in range(self.n_flows):
138
+ for hook in self.flows[i * 2]._forward_pre_hooks.values():
139
+ if (
140
+ hook.__module__ == "torch.nn.utils.weight_norm"
141
+ and hook.__class__.__name__ == "WeightNorm"
142
+ ):
143
+ torch.nn.utils.remove_weight_norm(self.flows[i * 2])
144
+
145
+ return self
146
+
147
+
148
+ class PosteriorEncoder(nn.Module):
149
+ def __init__(
150
+ self,
151
+ in_channels,
152
+ out_channels,
153
+ hidden_channels,
154
+ kernel_size,
155
+ dilation_rate,
156
+ n_layers,
157
+ gin_channels=0,
158
+ ):
159
+ super(PosteriorEncoder, self).__init__()
160
+ self.in_channels = in_channels
161
+ self.out_channels = out_channels
162
+ self.hidden_channels = hidden_channels
163
+ self.kernel_size = kernel_size
164
+ self.dilation_rate = dilation_rate
165
+ self.n_layers = n_layers
166
+ self.gin_channels = gin_channels
167
+
168
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
169
+ self.enc = modules.WN(
170
+ hidden_channels,
171
+ kernel_size,
172
+ dilation_rate,
173
+ n_layers,
174
+ gin_channels=gin_channels,
175
+ )
176
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
177
+
178
+ def forward(
179
+ self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
180
+ ):
181
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
182
+ x.dtype
183
+ )
184
+ x = self.pre(x) * x_mask
185
+ x = self.enc(x, x_mask, g=g)
186
+ stats = self.proj(x) * x_mask
187
+ m, logs = torch.split(stats, self.out_channels, dim=1)
188
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
189
+ return z, m, logs, x_mask
190
+
191
+ def remove_weight_norm(self):
192
+ self.enc.remove_weight_norm()
193
+
194
+ def __prepare_scriptable__(self):
195
+ for hook in self.enc._forward_pre_hooks.values():
196
+ if (
197
+ hook.__module__ == "torch.nn.utils.weight_norm"
198
+ and hook.__class__.__name__ == "WeightNorm"
199
+ ):
200
+ torch.nn.utils.remove_weight_norm(self.enc)
201
+ return self
202
+
203
+
204
+ class Generator(torch.nn.Module):
205
+ def __init__(
206
+ self,
207
+ initial_channel,
208
+ resblock,
209
+ resblock_kernel_sizes,
210
+ resblock_dilation_sizes,
211
+ upsample_rates,
212
+ upsample_initial_channel,
213
+ upsample_kernel_sizes,
214
+ gin_channels=0,
215
+ ):
216
+ super(Generator, self).__init__()
217
+ self.num_kernels = len(resblock_kernel_sizes)
218
+ self.num_upsamples = len(upsample_rates)
219
+ self.conv_pre = Conv1d(
220
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
221
+ )
222
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
223
+
224
+ self.ups = nn.ModuleList()
225
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
226
+ self.ups.append(
227
+ weight_norm(
228
+ ConvTranspose1d(
229
+ upsample_initial_channel // (2**i),
230
+ upsample_initial_channel // (2 ** (i + 1)),
231
+ k,
232
+ u,
233
+ padding=(k - u) // 2,
234
+ )
235
+ )
236
+ )
237
+
238
+ self.resblocks = nn.ModuleList()
239
+ for i in range(len(self.ups)):
240
+ ch = upsample_initial_channel // (2 ** (i + 1))
241
+ for j, (k, d) in enumerate(
242
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
243
+ ):
244
+ self.resblocks.append(resblock(ch, k, d))
245
+
246
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
247
+ self.ups.apply(init_weights)
248
+
249
+ if gin_channels != 0:
250
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
251
+
252
+ def forward(
253
+ self,
254
+ x: torch.Tensor,
255
+ g: Optional[torch.Tensor] = None,
256
+ n_res: Optional[torch.Tensor] = None,
257
+ ):
258
+ if n_res is not None:
259
+ assert isinstance(n_res, torch.Tensor)
260
+ n = int(n_res.item())
261
+ if n != x.shape[-1]:
262
+ x = F.interpolate(x, size=n, mode="linear")
263
+ x = self.conv_pre(x)
264
+ if g is not None:
265
+ x = x + self.cond(g)
266
+
267
+ for i in range(self.num_upsamples):
268
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
269
+ x = self.ups[i](x)
270
+ xs = None
271
+ for j in range(self.num_kernels):
272
+ if xs is None:
273
+ xs = self.resblocks[i * self.num_kernels + j](x)
274
+ else:
275
+ xs += self.resblocks[i * self.num_kernels + j](x)
276
+ x = xs / self.num_kernels
277
+ x = F.leaky_relu(x)
278
+ x = self.conv_post(x)
279
+ x = torch.tanh(x)
280
+
281
+ return x
282
+
283
+ def __prepare_scriptable__(self):
284
+ for l in self.ups:
285
+ for hook in l._forward_pre_hooks.values():
286
+ # The hook we want to remove is an instance of WeightNorm class, so
287
+ # normally we would do `if isinstance(...)` but this class is not accessible
288
+ # because of shadowing, so we check the module name directly.
289
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
290
+ if (
291
+ hook.__module__ == "torch.nn.utils.weight_norm"
292
+ and hook.__class__.__name__ == "WeightNorm"
293
+ ):
294
+ torch.nn.utils.remove_weight_norm(l)
295
+
296
+ for l in self.resblocks:
297
+ for hook in l._forward_pre_hooks.values():
298
+ if (
299
+ hook.__module__ == "torch.nn.utils.weight_norm"
300
+ and hook.__class__.__name__ == "WeightNorm"
301
+ ):
302
+ torch.nn.utils.remove_weight_norm(l)
303
+ return self
304
+
305
+ def remove_weight_norm(self):
306
+ for l in self.ups:
307
+ remove_weight_norm(l)
308
+ for l in self.resblocks:
309
+ l.remove_weight_norm()
310
+
311
+
312
+ class SineGen(torch.nn.Module):
313
+ """Definition of sine generator
314
+ SineGen(samp_rate, harmonic_num = 0,
315
+ sine_amp = 0.1, noise_std = 0.003,
316
+ voiced_threshold = 0,
317
+ flag_for_pulse=False)
318
+ samp_rate: sampling rate in Hz
319
+ harmonic_num: number of harmonic overtones (default 0)
320
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
321
+ noise_std: std of Gaussian noise (default 0.003)
322
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
323
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
324
+ Note: when flag_for_pulse is True, the first time step of a voiced
325
+ segment is always sin(torch.pi) or cos(0)
326
+ """
327
+
328
+ def __init__(
329
+ self,
330
+ samp_rate,
331
+ harmonic_num=0,
332
+ sine_amp=0.1,
333
+ noise_std=0.003,
334
+ voiced_threshold=0,
335
+ flag_for_pulse=False,
336
+ ):
337
+ super(SineGen, self).__init__()
338
+ self.sine_amp = sine_amp
339
+ self.noise_std = noise_std
340
+ self.harmonic_num = harmonic_num
341
+ self.dim = self.harmonic_num + 1
342
+ self.sampling_rate = samp_rate
343
+ self.voiced_threshold = voiced_threshold
344
+
345
+ def _f02uv(self, f0):
346
+ # generate uv signal
347
+ uv = torch.ones_like(f0)
348
+ uv = uv * (f0 > self.voiced_threshold)
349
+ if uv.device.type == "privateuseone": # for DirectML
350
+ uv = uv.float()
351
+ return uv
352
+
353
+ def _f02sine(self, f0, upp):
354
+ """ f0: (batchsize, length, dim)
355
+ where dim indicates fundamental tone and overtones
356
+ """
357
+ a = torch.arange(1, upp + 1, dtype=f0.dtype, device=f0.device)
358
+ rad = f0 / self.sampling_rate * a
359
+ rad2 = torch.fmod(rad[:, :-1, -1:].float() + 0.5, 1.0) - 0.5
360
+ rad_acc = rad2.cumsum(dim=1).fmod(1.0).to(f0)
361
+ rad += F.pad(rad_acc, (0, 0, 1, 0), mode='constant')
362
+ rad = rad.reshape(f0.shape[0], -1, 1)
363
+ b = torch.arange(1, self.dim + 1, dtype=f0.dtype, device=f0.device).reshape(1, 1, -1)
364
+ rad *= b
365
+ rand_ini = torch.rand(1, 1, self.dim, device=f0.device)
366
+ rand_ini[..., 0] = 0
367
+ rad += rand_ini
368
+ sines = torch.sin(2 * np.pi * rad)
369
+ return sines
370
+
371
+ def forward(self, f0: torch.Tensor, upp: int):
372
+ """sine_tensor, uv = forward(f0)
373
+ input F0: tensor(batchsize=1, length, dim=1)
374
+ f0 for unvoiced steps should be 0
375
+ output sine_tensor: tensor(batchsize=1, length, dim)
376
+ output uv: tensor(batchsize=1, length, 1)
377
+ """
378
+ with torch.no_grad():
379
+ f0 = f0.unsqueeze(-1)
380
+ sine_waves = self._f02sine(f0, upp) * self.sine_amp
381
+ uv = self._f02uv(f0)
382
+ uv = F.interpolate(
383
+ uv.transpose(2, 1), scale_factor=float(upp), mode="nearest"
384
+ ).transpose(2, 1)
385
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
386
+ noise = noise_amp * torch.randn_like(sine_waves)
387
+ sine_waves = sine_waves * uv + noise
388
+ return sine_waves, uv, noise
389
+
390
+
391
+ class SourceModuleHnNSF(torch.nn.Module):
392
+ """SourceModule for hn-nsf
393
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
394
+ add_noise_std=0.003, voiced_threshod=0)
395
+ sampling_rate: sampling_rate in Hz
396
+ harmonic_num: number of harmonic above F0 (default: 0)
397
+ sine_amp: amplitude of sine source signal (default: 0.1)
398
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
399
+ note that amplitude of noise in unvoiced is decided
400
+ by sine_amp
401
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
402
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
403
+ F0_sampled (batchsize, length, 1)
404
+ Sine_source (batchsize, length, 1)
405
+ noise_source (batchsize, length 1)
406
+ uv (batchsize, length, 1)
407
+ """
408
+
409
+ def __init__(
410
+ self,
411
+ sampling_rate,
412
+ harmonic_num=0,
413
+ sine_amp=0.1,
414
+ add_noise_std=0.003,
415
+ voiced_threshod=0,
416
+ is_half=True,
417
+ ):
418
+ super(SourceModuleHnNSF, self).__init__()
419
+
420
+ self.sine_amp = sine_amp
421
+ self.noise_std = add_noise_std
422
+ self.is_half = is_half
423
+ # to produce sine waveforms
424
+ self.l_sin_gen = SineGen(
425
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
426
+ )
427
+
428
+ # to merge source harmonics into a single excitation
429
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
430
+ self.l_tanh = torch.nn.Tanh()
431
+ # self.ddtype:int = -1
432
+
433
+ def forward(self, x: torch.Tensor, upp: int = 1):
434
+ # if self.ddtype ==-1:
435
+ # self.ddtype = self.l_linear.weight.dtype
436
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
437
+ # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype)
438
+ # if self.is_half:
439
+ # sine_wavs = sine_wavs.half()
440
+ # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x)))
441
+ # print(sine_wavs.dtype,self.ddtype)
442
+ # if sine_wavs.dtype != self.l_linear.weight.dtype:
443
+ sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
444
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
445
+ return sine_merge, None, None # noise, uv
446
+
447
+
448
+ class GeneratorNSF(torch.nn.Module):
449
+ def __init__(
450
+ self,
451
+ initial_channel,
452
+ resblock,
453
+ resblock_kernel_sizes,
454
+ resblock_dilation_sizes,
455
+ upsample_rates,
456
+ upsample_initial_channel,
457
+ upsample_kernel_sizes,
458
+ gin_channels,
459
+ sr,
460
+ is_half=False,
461
+ ):
462
+ super(GeneratorNSF, self).__init__()
463
+ self.num_kernels = len(resblock_kernel_sizes)
464
+ self.num_upsamples = len(upsample_rates)
465
+
466
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
467
+ self.m_source = SourceModuleHnNSF(
468
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
469
+ )
470
+ self.noise_convs = nn.ModuleList()
471
+ self.conv_pre = Conv1d(
472
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
473
+ )
474
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
475
+
476
+ self.ups = nn.ModuleList()
477
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
478
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
479
+ self.ups.append(
480
+ weight_norm(
481
+ ConvTranspose1d(
482
+ upsample_initial_channel // (2**i),
483
+ upsample_initial_channel // (2 ** (i + 1)),
484
+ k,
485
+ u,
486
+ padding=(k - u) // 2,
487
+ )
488
+ )
489
+ )
490
+ if i + 1 < len(upsample_rates):
491
+ stride_f0 = math.prod(upsample_rates[i + 1 :])
492
+ self.noise_convs.append(
493
+ Conv1d(
494
+ 1,
495
+ c_cur,
496
+ kernel_size=stride_f0 * 2,
497
+ stride=stride_f0,
498
+ padding=stride_f0 // 2,
499
+ )
500
+ )
501
+ else:
502
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
503
+
504
+ self.resblocks = nn.ModuleList()
505
+ for i in range(len(self.ups)):
506
+ ch = upsample_initial_channel // (2 ** (i + 1))
507
+ for j, (k, d) in enumerate(
508
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
509
+ ):
510
+ self.resblocks.append(resblock(ch, k, d))
511
+
512
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
513
+ self.ups.apply(init_weights)
514
+
515
+ if gin_channels != 0:
516
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
517
+
518
+ self.upp = math.prod(upsample_rates)
519
+
520
+ self.lrelu_slope = modules.LRELU_SLOPE
521
+
522
+ def forward(
523
+ self,
524
+ x,
525
+ f0,
526
+ g: Optional[torch.Tensor] = None,
527
+ n_res: Optional[torch.Tensor] = None,
528
+ ):
529
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
530
+ har_source = har_source.transpose(1, 2)
531
+ if n_res is not None:
532
+ assert isinstance(n_res, torch.Tensor)
533
+ n = int(n_res.item())
534
+ if n * self.upp != har_source.shape[-1]:
535
+ har_source = F.interpolate(har_source, size=n * self.upp, mode="linear")
536
+ if n != x.shape[-1]:
537
+ x = F.interpolate(x, size=n, mode="linear")
538
+ x = self.conv_pre(x)
539
+ if g is not None:
540
+ x = x + self.cond(g)
541
+ # torch.jit.script() does not support direct indexing of torch modules
542
+ # That's why I wrote this
543
+ for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
544
+ if i < self.num_upsamples:
545
+ x = F.leaky_relu(x, self.lrelu_slope)
546
+ x = ups(x)
547
+ x_source = noise_convs(har_source)
548
+ x = x + x_source
549
+ xs: Optional[torch.Tensor] = None
550
+ l = [i * self.num_kernels + j for j in range(self.num_kernels)]
551
+ for j, resblock in enumerate(self.resblocks):
552
+ if j in l:
553
+ if xs is None:
554
+ xs = resblock(x)
555
+ else:
556
+ xs += resblock(x)
557
+ # This assertion cannot be ignored! \
558
+ # If ignored, it will cause torch.jit.script() compilation errors
559
+ assert isinstance(xs, torch.Tensor)
560
+ x = xs / self.num_kernels
561
+ x = F.leaky_relu(x)
562
+ x = self.conv_post(x)
563
+ x = torch.tanh(x)
564
+
565
+ return x
566
+
567
+ def remove_weight_norm(self):
568
+ for l in self.ups:
569
+ remove_weight_norm(l)
570
+ for l in self.resblocks:
571
+ l.remove_weight_norm()
572
+
573
+ def __prepare_scriptable__(self):
574
+ for l in self.ups:
575
+ for hook in l._forward_pre_hooks.values():
576
+ # The hook we want to remove is an instance of WeightNorm class, so
577
+ # normally we would do `if isinstance(...)` but this class is not accessible
578
+ # because of shadowing, so we check the module name directly.
579
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
580
+ if (
581
+ hook.__module__ == "torch.nn.utils.weight_norm"
582
+ and hook.__class__.__name__ == "WeightNorm"
583
+ ):
584
+ torch.nn.utils.remove_weight_norm(l)
585
+ for l in self.resblocks:
586
+ for hook in self.resblocks._forward_pre_hooks.values():
587
+ if (
588
+ hook.__module__ == "torch.nn.utils.weight_norm"
589
+ and hook.__class__.__name__ == "WeightNorm"
590
+ ):
591
+ torch.nn.utils.remove_weight_norm(l)
592
+ return self
593
+
594
+
595
+ sr2sr = {
596
+ "32k": 32000,
597
+ "40k": 40000,
598
+ "48k": 48000,
599
+ }
600
+
601
+
602
+ class SynthesizerTrnMs256NSFsid(nn.Module):
603
+ def __init__(
604
+ self,
605
+ spec_channels,
606
+ segment_size,
607
+ inter_channels,
608
+ hidden_channels,
609
+ filter_channels,
610
+ n_heads,
611
+ n_layers,
612
+ kernel_size,
613
+ p_dropout,
614
+ resblock,
615
+ resblock_kernel_sizes,
616
+ resblock_dilation_sizes,
617
+ upsample_rates,
618
+ upsample_initial_channel,
619
+ upsample_kernel_sizes,
620
+ spk_embed_dim,
621
+ gin_channels,
622
+ sr,
623
+ **kwargs
624
+ ):
625
+ super(SynthesizerTrnMs256NSFsid, self).__init__()
626
+ if isinstance(sr, str):
627
+ sr = sr2sr[sr]
628
+ self.spec_channels = spec_channels
629
+ self.inter_channels = inter_channels
630
+ self.hidden_channels = hidden_channels
631
+ self.filter_channels = filter_channels
632
+ self.n_heads = n_heads
633
+ self.n_layers = n_layers
634
+ self.kernel_size = kernel_size
635
+ self.p_dropout = float(p_dropout)
636
+ self.resblock = resblock
637
+ self.resblock_kernel_sizes = resblock_kernel_sizes
638
+ self.resblock_dilation_sizes = resblock_dilation_sizes
639
+ self.upsample_rates = upsample_rates
640
+ self.upsample_initial_channel = upsample_initial_channel
641
+ self.upsample_kernel_sizes = upsample_kernel_sizes
642
+ self.segment_size = segment_size
643
+ self.gin_channels = gin_channels
644
+ # self.hop_length = hop_length#
645
+ self.spk_embed_dim = spk_embed_dim
646
+ self.enc_p = TextEncoder(
647
+ 256,
648
+ inter_channels,
649
+ hidden_channels,
650
+ filter_channels,
651
+ n_heads,
652
+ n_layers,
653
+ kernel_size,
654
+ float(p_dropout),
655
+ )
656
+ self.dec = GeneratorNSF(
657
+ inter_channels,
658
+ resblock,
659
+ resblock_kernel_sizes,
660
+ resblock_dilation_sizes,
661
+ upsample_rates,
662
+ upsample_initial_channel,
663
+ upsample_kernel_sizes,
664
+ gin_channels=gin_channels,
665
+ sr=sr,
666
+ is_half=kwargs["is_half"],
667
+ )
668
+ self.enc_q = PosteriorEncoder(
669
+ spec_channels,
670
+ inter_channels,
671
+ hidden_channels,
672
+ 5,
673
+ 1,
674
+ 16,
675
+ gin_channels=gin_channels,
676
+ )
677
+ self.flow = ResidualCouplingBlock(
678
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
679
+ )
680
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
681
+ logger.debug(
682
+ "gin_channels: "
683
+ + str(gin_channels)
684
+ + ", self.spk_embed_dim: "
685
+ + str(self.spk_embed_dim)
686
+ )
687
+
688
+ def remove_weight_norm(self):
689
+ self.dec.remove_weight_norm()
690
+ self.flow.remove_weight_norm()
691
+ if hasattr(self, "enc_q"):
692
+ self.enc_q.remove_weight_norm()
693
+
694
+ def __prepare_scriptable__(self):
695
+ for hook in self.dec._forward_pre_hooks.values():
696
+ # The hook we want to remove is an instance of WeightNorm class, so
697
+ # normally we would do `if isinstance(...)` but this class is not accessible
698
+ # because of shadowing, so we check the module name directly.
699
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
700
+ if (
701
+ hook.__module__ == "torch.nn.utils.weight_norm"
702
+ and hook.__class__.__name__ == "WeightNorm"
703
+ ):
704
+ torch.nn.utils.remove_weight_norm(self.dec)
705
+ for hook in self.flow._forward_pre_hooks.values():
706
+ if (
707
+ hook.__module__ == "torch.nn.utils.weight_norm"
708
+ and hook.__class__.__name__ == "WeightNorm"
709
+ ):
710
+ torch.nn.utils.remove_weight_norm(self.flow)
711
+ if hasattr(self, "enc_q"):
712
+ for hook in self.enc_q._forward_pre_hooks.values():
713
+ if (
714
+ hook.__module__ == "torch.nn.utils.weight_norm"
715
+ and hook.__class__.__name__ == "WeightNorm"
716
+ ):
717
+ torch.nn.utils.remove_weight_norm(self.enc_q)
718
+ return self
719
+
720
+ @torch.jit.ignore
721
+ def forward(
722
+ self,
723
+ phone: torch.Tensor,
724
+ phone_lengths: torch.Tensor,
725
+ pitch: torch.Tensor,
726
+ pitchf: torch.Tensor,
727
+ y: torch.Tensor,
728
+ y_lengths: torch.Tensor,
729
+ ds: Optional[torch.Tensor] = None,
730
+ ): # 这里ds是id,[bs,1]
731
+ # print(1,pitch.shape)#[bs,t]
732
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
733
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
734
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
735
+ z_p = self.flow(z, y_mask, g=g)
736
+ z_slice, ids_slice = commons.rand_slice_segments(
737
+ z, y_lengths, self.segment_size
738
+ )
739
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
740
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
741
+ # print(-2,pitchf.shape,z_slice.shape)
742
+ o = self.dec(z_slice, pitchf, g=g)
743
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
744
+
745
+ @torch.jit.export
746
+ def infer(
747
+ self,
748
+ phone: torch.Tensor,
749
+ phone_lengths: torch.Tensor,
750
+ pitch: torch.Tensor,
751
+ nsff0: torch.Tensor,
752
+ sid: torch.Tensor,
753
+ skip_head: Optional[torch.Tensor] = None,
754
+ return_length: Optional[torch.Tensor] = None,
755
+ return_length2: Optional[torch.Tensor] = None,
756
+ ):
757
+ g = self.emb_g(sid).unsqueeze(-1)
758
+ if skip_head is not None and return_length is not None:
759
+ assert isinstance(skip_head, torch.Tensor)
760
+ assert isinstance(return_length, torch.Tensor)
761
+ head = int(skip_head.item())
762
+ length = int(return_length.item())
763
+ flow_head = torch.clamp(skip_head - 24, min=0)
764
+ dec_head = head - int(flow_head.item())
765
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths, flow_head)
766
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
767
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
768
+ z = z[:, :, dec_head : dec_head + length]
769
+ x_mask = x_mask[:, :, dec_head : dec_head + length]
770
+ nsff0 = nsff0[:, head : head + length]
771
+ else:
772
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
773
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
774
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
775
+ o = self.dec(z * x_mask, nsff0, g=g, n_res=return_length2)
776
+ return o, x_mask, (z, z_p, m_p, logs_p)
777
+
778
+
779
+ class SynthesizerTrnMs768NSFsid(SynthesizerTrnMs256NSFsid):
780
+ def __init__(
781
+ self,
782
+ spec_channels,
783
+ segment_size,
784
+ inter_channels,
785
+ hidden_channels,
786
+ filter_channels,
787
+ n_heads,
788
+ n_layers,
789
+ kernel_size,
790
+ p_dropout,
791
+ resblock,
792
+ resblock_kernel_sizes,
793
+ resblock_dilation_sizes,
794
+ upsample_rates,
795
+ upsample_initial_channel,
796
+ upsample_kernel_sizes,
797
+ spk_embed_dim,
798
+ gin_channels,
799
+ sr,
800
+ **kwargs
801
+ ):
802
+ super(SynthesizerTrnMs768NSFsid, self).__init__(
803
+ spec_channels,
804
+ segment_size,
805
+ inter_channels,
806
+ hidden_channels,
807
+ filter_channels,
808
+ n_heads,
809
+ n_layers,
810
+ kernel_size,
811
+ p_dropout,
812
+ resblock,
813
+ resblock_kernel_sizes,
814
+ resblock_dilation_sizes,
815
+ upsample_rates,
816
+ upsample_initial_channel,
817
+ upsample_kernel_sizes,
818
+ spk_embed_dim,
819
+ gin_channels,
820
+ sr,
821
+ **kwargs
822
+ )
823
+ del self.enc_p
824
+ self.enc_p = TextEncoder(
825
+ 768,
826
+ inter_channels,
827
+ hidden_channels,
828
+ filter_channels,
829
+ n_heads,
830
+ n_layers,
831
+ kernel_size,
832
+ float(p_dropout),
833
+ )
834
+
835
+
836
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
837
+ def __init__(
838
+ self,
839
+ spec_channels,
840
+ segment_size,
841
+ inter_channels,
842
+ hidden_channels,
843
+ filter_channels,
844
+ n_heads,
845
+ n_layers,
846
+ kernel_size,
847
+ p_dropout,
848
+ resblock,
849
+ resblock_kernel_sizes,
850
+ resblock_dilation_sizes,
851
+ upsample_rates,
852
+ upsample_initial_channel,
853
+ upsample_kernel_sizes,
854
+ spk_embed_dim,
855
+ gin_channels,
856
+ sr=None,
857
+ **kwargs
858
+ ):
859
+ super(SynthesizerTrnMs256NSFsid_nono, self).__init__()
860
+ self.spec_channels = spec_channels
861
+ self.inter_channels = inter_channels
862
+ self.hidden_channels = hidden_channels
863
+ self.filter_channels = filter_channels
864
+ self.n_heads = n_heads
865
+ self.n_layers = n_layers
866
+ self.kernel_size = kernel_size
867
+ self.p_dropout = float(p_dropout)
868
+ self.resblock = resblock
869
+ self.resblock_kernel_sizes = resblock_kernel_sizes
870
+ self.resblock_dilation_sizes = resblock_dilation_sizes
871
+ self.upsample_rates = upsample_rates
872
+ self.upsample_initial_channel = upsample_initial_channel
873
+ self.upsample_kernel_sizes = upsample_kernel_sizes
874
+ self.segment_size = segment_size
875
+ self.gin_channels = gin_channels
876
+ # self.hop_length = hop_length#
877
+ self.spk_embed_dim = spk_embed_dim
878
+ self.enc_p = TextEncoder(
879
+ 256,
880
+ inter_channels,
881
+ hidden_channels,
882
+ filter_channels,
883
+ n_heads,
884
+ n_layers,
885
+ kernel_size,
886
+ float(p_dropout),
887
+ f0=False,
888
+ )
889
+ self.dec = Generator(
890
+ inter_channels,
891
+ resblock,
892
+ resblock_kernel_sizes,
893
+ resblock_dilation_sizes,
894
+ upsample_rates,
895
+ upsample_initial_channel,
896
+ upsample_kernel_sizes,
897
+ gin_channels=gin_channels,
898
+ )
899
+ self.enc_q = PosteriorEncoder(
900
+ spec_channels,
901
+ inter_channels,
902
+ hidden_channels,
903
+ 5,
904
+ 1,
905
+ 16,
906
+ gin_channels=gin_channels,
907
+ )
908
+ self.flow = ResidualCouplingBlock(
909
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
910
+ )
911
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
912
+ logger.debug(
913
+ "gin_channels: "
914
+ + str(gin_channels)
915
+ + ", self.spk_embed_dim: "
916
+ + str(self.spk_embed_dim)
917
+ )
918
+
919
+ def remove_weight_norm(self):
920
+ self.dec.remove_weight_norm()
921
+ self.flow.remove_weight_norm()
922
+ if hasattr(self, "enc_q"):
923
+ self.enc_q.remove_weight_norm()
924
+
925
+ def __prepare_scriptable__(self):
926
+ for hook in self.dec._forward_pre_hooks.values():
927
+ # The hook we want to remove is an instance of WeightNorm class, so
928
+ # normally we would do `if isinstance(...)` but this class is not accessible
929
+ # because of shadowing, so we check the module name directly.
930
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
931
+ if (
932
+ hook.__module__ == "torch.nn.utils.weight_norm"
933
+ and hook.__class__.__name__ == "WeightNorm"
934
+ ):
935
+ torch.nn.utils.remove_weight_norm(self.dec)
936
+ for hook in self.flow._forward_pre_hooks.values():
937
+ if (
938
+ hook.__module__ == "torch.nn.utils.weight_norm"
939
+ and hook.__class__.__name__ == "WeightNorm"
940
+ ):
941
+ torch.nn.utils.remove_weight_norm(self.flow)
942
+ if hasattr(self, "enc_q"):
943
+ for hook in self.enc_q._forward_pre_hooks.values():
944
+ if (
945
+ hook.__module__ == "torch.nn.utils.weight_norm"
946
+ and hook.__class__.__name__ == "WeightNorm"
947
+ ):
948
+ torch.nn.utils.remove_weight_norm(self.enc_q)
949
+ return self
950
+
951
+ @torch.jit.ignore
952
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
953
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
954
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
955
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
956
+ z_p = self.flow(z, y_mask, g=g)
957
+ z_slice, ids_slice = commons.rand_slice_segments(
958
+ z, y_lengths, self.segment_size
959
+ )
960
+ o = self.dec(z_slice, g=g)
961
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
962
+
963
+ @torch.jit.export
964
+ def infer(
965
+ self,
966
+ phone: torch.Tensor,
967
+ phone_lengths: torch.Tensor,
968
+ sid: torch.Tensor,
969
+ skip_head: Optional[torch.Tensor] = None,
970
+ return_length: Optional[torch.Tensor] = None,
971
+ return_length2: Optional[torch.Tensor] = None,
972
+ ):
973
+ g = self.emb_g(sid).unsqueeze(-1)
974
+ if skip_head is not None and return_length is not None:
975
+ assert isinstance(skip_head, torch.Tensor)
976
+ assert isinstance(return_length, torch.Tensor)
977
+ head = int(skip_head.item())
978
+ length = int(return_length.item())
979
+ flow_head = torch.clamp(skip_head - 24, min=0)
980
+ dec_head = head - int(flow_head.item())
981
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths, flow_head)
982
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
983
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
984
+ z = z[:, :, dec_head : dec_head + length]
985
+ x_mask = x_mask[:, :, dec_head : dec_head + length]
986
+ else:
987
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
988
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
989
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
990
+ o = self.dec(z * x_mask, g=g, n_res=return_length2)
991
+ return o, x_mask, (z, z_p, m_p, logs_p)
992
+
993
+
994
+ class SynthesizerTrnMs768NSFsid_nono(SynthesizerTrnMs256NSFsid_nono):
995
+ def __init__(
996
+ self,
997
+ spec_channels,
998
+ segment_size,
999
+ inter_channels,
1000
+ hidden_channels,
1001
+ filter_channels,
1002
+ n_heads,
1003
+ n_layers,
1004
+ kernel_size,
1005
+ p_dropout,
1006
+ resblock,
1007
+ resblock_kernel_sizes,
1008
+ resblock_dilation_sizes,
1009
+ upsample_rates,
1010
+ upsample_initial_channel,
1011
+ upsample_kernel_sizes,
1012
+ spk_embed_dim,
1013
+ gin_channels,
1014
+ sr=None,
1015
+ **kwargs
1016
+ ):
1017
+ super(SynthesizerTrnMs768NSFsid_nono, self).__init__(
1018
+ spec_channels,
1019
+ segment_size,
1020
+ inter_channels,
1021
+ hidden_channels,
1022
+ filter_channels,
1023
+ n_heads,
1024
+ n_layers,
1025
+ kernel_size,
1026
+ p_dropout,
1027
+ resblock,
1028
+ resblock_kernel_sizes,
1029
+ resblock_dilation_sizes,
1030
+ upsample_rates,
1031
+ upsample_initial_channel,
1032
+ upsample_kernel_sizes,
1033
+ spk_embed_dim,
1034
+ gin_channels,
1035
+ sr,
1036
+ **kwargs
1037
+ )
1038
+ del self.enc_p
1039
+ self.enc_p = TextEncoder(
1040
+ 768,
1041
+ inter_channels,
1042
+ hidden_channels,
1043
+ filter_channels,
1044
+ n_heads,
1045
+ n_layers,
1046
+ kernel_size,
1047
+ float(p_dropout),
1048
+ f0=False,
1049
+ )
1050
+
1051
+
1052
+ class MultiPeriodDiscriminator(torch.nn.Module):
1053
+ def __init__(self, use_spectral_norm=False):
1054
+ super(MultiPeriodDiscriminator, self).__init__()
1055
+ periods = [2, 3, 5, 7, 11, 17]
1056
+ # periods = [3, 5, 7, 11, 17, 23, 37]
1057
+
1058
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1059
+ discs = discs + [
1060
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1061
+ ]
1062
+ self.discriminators = nn.ModuleList(discs)
1063
+
1064
+ def forward(self, y, y_hat):
1065
+ y_d_rs = [] #
1066
+ y_d_gs = []
1067
+ fmap_rs = []
1068
+ fmap_gs = []
1069
+ for i, d in enumerate(self.discriminators):
1070
+ y_d_r, fmap_r = d(y)
1071
+ y_d_g, fmap_g = d(y_hat)
1072
+ # for j in range(len(fmap_r)):
1073
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1074
+ y_d_rs.append(y_d_r)
1075
+ y_d_gs.append(y_d_g)
1076
+ fmap_rs.append(fmap_r)
1077
+ fmap_gs.append(fmap_g)
1078
+
1079
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1080
+
1081
+
1082
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
1083
+ def __init__(self, use_spectral_norm=False):
1084
+ super(MultiPeriodDiscriminatorV2, self).__init__()
1085
+ # periods = [2, 3, 5, 7, 11, 17]
1086
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
1087
+
1088
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1089
+ discs = discs + [
1090
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1091
+ ]
1092
+ self.discriminators = nn.ModuleList(discs)
1093
+
1094
+ def forward(self, y, y_hat):
1095
+ y_d_rs = [] #
1096
+ y_d_gs = []
1097
+ fmap_rs = []
1098
+ fmap_gs = []
1099
+ for i, d in enumerate(self.discriminators):
1100
+ y_d_r, fmap_r = d(y)
1101
+ y_d_g, fmap_g = d(y_hat)
1102
+ # for j in range(len(fmap_r)):
1103
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1104
+ y_d_rs.append(y_d_r)
1105
+ y_d_gs.append(y_d_g)
1106
+ fmap_rs.append(fmap_r)
1107
+ fmap_gs.append(fmap_g)
1108
+
1109
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1110
+
1111
+
1112
+ class DiscriminatorS(torch.nn.Module):
1113
+ def __init__(self, use_spectral_norm=False):
1114
+ super(DiscriminatorS, self).__init__()
1115
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1116
+ self.convs = nn.ModuleList(
1117
+ [
1118
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1119
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1120
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1121
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1122
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1123
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1124
+ ]
1125
+ )
1126
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1127
+
1128
+ def forward(self, x):
1129
+ fmap = []
1130
+
1131
+ for l in self.convs:
1132
+ x = l(x)
1133
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1134
+ fmap.append(x)
1135
+ x = self.conv_post(x)
1136
+ fmap.append(x)
1137
+ x = torch.flatten(x, 1, -1)
1138
+
1139
+ return x, fmap
1140
+
1141
+
1142
+ class DiscriminatorP(torch.nn.Module):
1143
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1144
+ super(DiscriminatorP, self).__init__()
1145
+ self.period = period
1146
+ self.use_spectral_norm = use_spectral_norm
1147
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1148
+ self.convs = nn.ModuleList(
1149
+ [
1150
+ norm_f(
1151
+ Conv2d(
1152
+ 1,
1153
+ 32,
1154
+ (kernel_size, 1),
1155
+ (stride, 1),
1156
+ padding=(get_padding(kernel_size, 1), 0),
1157
+ )
1158
+ ),
1159
+ norm_f(
1160
+ Conv2d(
1161
+ 32,
1162
+ 128,
1163
+ (kernel_size, 1),
1164
+ (stride, 1),
1165
+ padding=(get_padding(kernel_size, 1), 0),
1166
+ )
1167
+ ),
1168
+ norm_f(
1169
+ Conv2d(
1170
+ 128,
1171
+ 512,
1172
+ (kernel_size, 1),
1173
+ (stride, 1),
1174
+ padding=(get_padding(kernel_size, 1), 0),
1175
+ )
1176
+ ),
1177
+ norm_f(
1178
+ Conv2d(
1179
+ 512,
1180
+ 1024,
1181
+ (kernel_size, 1),
1182
+ (stride, 1),
1183
+ padding=(get_padding(kernel_size, 1), 0),
1184
+ )
1185
+ ),
1186
+ norm_f(
1187
+ Conv2d(
1188
+ 1024,
1189
+ 1024,
1190
+ (kernel_size, 1),
1191
+ 1,
1192
+ padding=(get_padding(kernel_size, 1), 0),
1193
+ )
1194
+ ),
1195
+ ]
1196
+ )
1197
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1198
+
1199
+ def forward(self, x):
1200
+ fmap = []
1201
+
1202
+ # 1d to 2d
1203
+ b, c, t = x.shape
1204
+ if t % self.period != 0: # pad first
1205
+ n_pad = self.period - (t % self.period)
1206
+ if has_xpu and x.dtype == torch.bfloat16:
1207
+ x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(
1208
+ dtype=torch.bfloat16
1209
+ )
1210
+ else:
1211
+ x = F.pad(x, (0, n_pad), "reflect")
1212
+ t = t + n_pad
1213
+ x = x.view(b, c, t // self.period, self.period)
1214
+
1215
+ for l in self.convs:
1216
+ x = l(x)
1217
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1218
+ fmap.append(x)
1219
+ x = self.conv_post(x)
1220
+ fmap.append(x)
1221
+ x = torch.flatten(x, 1, -1)
1222
+
1223
+ return x, fmap
libs/infer_packs/models_onnx.py ADDED
@@ -0,0 +1,818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ############################## Warning! ##############################
2
+ # #
3
+ # Onnx Export Not Support All Of Non-Torch Types #
4
+ # Include Python Built-in Types!!!!!!!!!!!!!!!!! #
5
+ # If You Want TO Change This File #
6
+ # Do Not Use All Of Non-Torch Types! #
7
+ # #
8
+ ############################## Warning! ##############################
9
+
10
+ import math
11
+ import logging
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ import numpy as np
16
+ import torch
17
+ from torch import nn
18
+ from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
19
+ from torch.nn import functional as F
20
+ from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
21
+
22
+ from infer.lib.infer_pack import commons, modules
23
+ import infer.lib.infer_pack.attentions_onnx as attentions
24
+ from infer.lib.infer_pack.commons import get_padding, init_weights
25
+
26
+
27
+ class TextEncoder256(nn.Module):
28
+ def __init__(
29
+ self,
30
+ out_channels,
31
+ hidden_channels,
32
+ filter_channels,
33
+ n_heads,
34
+ n_layers,
35
+ kernel_size,
36
+ p_dropout,
37
+ f0=True,
38
+ ):
39
+ super().__init__()
40
+ self.out_channels = out_channels
41
+ self.hidden_channels = hidden_channels
42
+ self.filter_channels = filter_channels
43
+ self.n_heads = n_heads
44
+ self.n_layers = n_layers
45
+ self.kernel_size = kernel_size
46
+ self.p_dropout = p_dropout
47
+ self.emb_phone = nn.Linear(256, hidden_channels)
48
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
49
+ if f0 == True:
50
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
51
+ self.encoder = attentions.Encoder(
52
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
53
+ )
54
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
55
+
56
+ def forward(self, phone, pitch, lengths):
57
+ if pitch == None:
58
+ x = self.emb_phone(phone)
59
+ else:
60
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
61
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
62
+ x = self.lrelu(x)
63
+ x = torch.transpose(x, 1, -1) # [b, h, t]
64
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
65
+ x.dtype
66
+ )
67
+ x = self.encoder(x * x_mask, x_mask)
68
+ stats = self.proj(x) * x_mask
69
+
70
+ m, logs = torch.split(stats, self.out_channels, dim=1)
71
+ return m, logs, x_mask
72
+
73
+
74
+ class TextEncoder768(nn.Module):
75
+ def __init__(
76
+ self,
77
+ out_channels,
78
+ hidden_channels,
79
+ filter_channels,
80
+ n_heads,
81
+ n_layers,
82
+ kernel_size,
83
+ p_dropout,
84
+ f0=True,
85
+ ):
86
+ super().__init__()
87
+ self.out_channels = out_channels
88
+ self.hidden_channels = hidden_channels
89
+ self.filter_channels = filter_channels
90
+ self.n_heads = n_heads
91
+ self.n_layers = n_layers
92
+ self.kernel_size = kernel_size
93
+ self.p_dropout = p_dropout
94
+ self.emb_phone = nn.Linear(768, hidden_channels)
95
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
96
+ if f0 == True:
97
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
98
+ self.encoder = attentions.Encoder(
99
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
100
+ )
101
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
102
+
103
+ def forward(self, phone, pitch, lengths):
104
+ if pitch == None:
105
+ x = self.emb_phone(phone)
106
+ else:
107
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
108
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
109
+ x = self.lrelu(x)
110
+ x = torch.transpose(x, 1, -1) # [b, h, t]
111
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
112
+ x.dtype
113
+ )
114
+ x = self.encoder(x * x_mask, x_mask)
115
+ stats = self.proj(x) * x_mask
116
+
117
+ m, logs = torch.split(stats, self.out_channels, dim=1)
118
+ return m, logs, x_mask
119
+
120
+
121
+ class ResidualCouplingBlock(nn.Module):
122
+ def __init__(
123
+ self,
124
+ channels,
125
+ hidden_channels,
126
+ kernel_size,
127
+ dilation_rate,
128
+ n_layers,
129
+ n_flows=4,
130
+ gin_channels=0,
131
+ ):
132
+ super().__init__()
133
+ self.channels = channels
134
+ self.hidden_channels = hidden_channels
135
+ self.kernel_size = kernel_size
136
+ self.dilation_rate = dilation_rate
137
+ self.n_layers = n_layers
138
+ self.n_flows = n_flows
139
+ self.gin_channels = gin_channels
140
+
141
+ self.flows = nn.ModuleList()
142
+ for i in range(n_flows):
143
+ self.flows.append(
144
+ modules.ResidualCouplingLayer(
145
+ channels,
146
+ hidden_channels,
147
+ kernel_size,
148
+ dilation_rate,
149
+ n_layers,
150
+ gin_channels=gin_channels,
151
+ mean_only=True,
152
+ )
153
+ )
154
+ self.flows.append(modules.Flip())
155
+
156
+ def forward(self, x, x_mask, g=None, reverse=False):
157
+ if not reverse:
158
+ for flow in self.flows:
159
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
160
+ else:
161
+ for flow in reversed(self.flows):
162
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
163
+ return x
164
+
165
+ def remove_weight_norm(self):
166
+ for i in range(self.n_flows):
167
+ self.flows[i * 2].remove_weight_norm()
168
+
169
+
170
+ class PosteriorEncoder(nn.Module):
171
+ def __init__(
172
+ self,
173
+ in_channels,
174
+ out_channels,
175
+ hidden_channels,
176
+ kernel_size,
177
+ dilation_rate,
178
+ n_layers,
179
+ gin_channels=0,
180
+ ):
181
+ super().__init__()
182
+ self.in_channels = in_channels
183
+ self.out_channels = out_channels
184
+ self.hidden_channels = hidden_channels
185
+ self.kernel_size = kernel_size
186
+ self.dilation_rate = dilation_rate
187
+ self.n_layers = n_layers
188
+ self.gin_channels = gin_channels
189
+
190
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
191
+ self.enc = modules.WN(
192
+ hidden_channels,
193
+ kernel_size,
194
+ dilation_rate,
195
+ n_layers,
196
+ gin_channels=gin_channels,
197
+ )
198
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
199
+
200
+ def forward(self, x, x_lengths, g=None):
201
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
202
+ x.dtype
203
+ )
204
+ x = self.pre(x) * x_mask
205
+ x = self.enc(x, x_mask, g=g)
206
+ stats = self.proj(x) * x_mask
207
+ m, logs = torch.split(stats, self.out_channels, dim=1)
208
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
209
+ return z, m, logs, x_mask
210
+
211
+ def remove_weight_norm(self):
212
+ self.enc.remove_weight_norm()
213
+
214
+
215
+ class Generator(torch.nn.Module):
216
+ def __init__(
217
+ self,
218
+ initial_channel,
219
+ resblock,
220
+ resblock_kernel_sizes,
221
+ resblock_dilation_sizes,
222
+ upsample_rates,
223
+ upsample_initial_channel,
224
+ upsample_kernel_sizes,
225
+ gin_channels=0,
226
+ ):
227
+ super(Generator, self).__init__()
228
+ self.num_kernels = len(resblock_kernel_sizes)
229
+ self.num_upsamples = len(upsample_rates)
230
+ self.conv_pre = Conv1d(
231
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
232
+ )
233
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
234
+
235
+ self.ups = nn.ModuleList()
236
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
237
+ self.ups.append(
238
+ weight_norm(
239
+ ConvTranspose1d(
240
+ upsample_initial_channel // (2**i),
241
+ upsample_initial_channel // (2 ** (i + 1)),
242
+ k,
243
+ u,
244
+ padding=(k - u) // 2,
245
+ )
246
+ )
247
+ )
248
+
249
+ self.resblocks = nn.ModuleList()
250
+ for i in range(len(self.ups)):
251
+ ch = upsample_initial_channel // (2 ** (i + 1))
252
+ for j, (k, d) in enumerate(
253
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
254
+ ):
255
+ self.resblocks.append(resblock(ch, k, d))
256
+
257
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
258
+ self.ups.apply(init_weights)
259
+
260
+ if gin_channels != 0:
261
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
262
+
263
+ def forward(self, x, g=None):
264
+ x = self.conv_pre(x)
265
+ if g is not None:
266
+ x = x + self.cond(g)
267
+
268
+ for i in range(self.num_upsamples):
269
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
270
+ x = self.ups[i](x)
271
+ xs = None
272
+ for j in range(self.num_kernels):
273
+ if xs is None:
274
+ xs = self.resblocks[i * self.num_kernels + j](x)
275
+ else:
276
+ xs += self.resblocks[i * self.num_kernels + j](x)
277
+ x = xs / self.num_kernels
278
+ x = F.leaky_relu(x)
279
+ x = self.conv_post(x)
280
+ x = torch.tanh(x)
281
+
282
+ return x
283
+
284
+ def remove_weight_norm(self):
285
+ for l in self.ups:
286
+ remove_weight_norm(l)
287
+ for l in self.resblocks:
288
+ l.remove_weight_norm()
289
+
290
+
291
+ class SineGen(torch.nn.Module):
292
+ """Definition of sine generator
293
+ SineGen(samp_rate, harmonic_num = 0,
294
+ sine_amp = 0.1, noise_std = 0.003,
295
+ voiced_threshold = 0,
296
+ flag_for_pulse=False)
297
+ samp_rate: sampling rate in Hz
298
+ harmonic_num: number of harmonic overtones (default 0)
299
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
300
+ noise_std: std of Gaussian noise (default 0.003)
301
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
302
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
303
+ Note: when flag_for_pulse is True, the first time step of a voiced
304
+ segment is always sin(np.pi) or cos(0)
305
+ """
306
+
307
+ def __init__(
308
+ self,
309
+ samp_rate,
310
+ harmonic_num=0,
311
+ sine_amp=0.1,
312
+ noise_std=0.003,
313
+ voiced_threshold=0,
314
+ flag_for_pulse=False,
315
+ ):
316
+ super(SineGen, self).__init__()
317
+ self.sine_amp = sine_amp
318
+ self.noise_std = noise_std
319
+ self.harmonic_num = harmonic_num
320
+ self.dim = self.harmonic_num + 1
321
+ self.sampling_rate = samp_rate
322
+ self.voiced_threshold = voiced_threshold
323
+
324
+ def _f02uv(self, f0):
325
+ # generate uv signal
326
+ uv = torch.ones_like(f0)
327
+ uv = uv * (f0 > self.voiced_threshold)
328
+ if uv.device.type == "privateuseone": # for DirectML
329
+ uv = uv.float()
330
+ return uv
331
+
332
+ def _f02sine(self, f0, upp):
333
+ """ f0: (batchsize, length, dim)
334
+ where dim indicates fundamental tone and overtones
335
+ """
336
+ a = torch.arange(1, upp + 1, dtype=f0.dtype, device=f0.device)
337
+ rad = f0 / self.sampling_rate * a
338
+ rad2 = torch.fmod(rad[:, :-1, -1:].float() + 0.5, 1.0) - 0.5
339
+ rad_acc = rad2.cumsum(dim=1).fmod(1.0).to(f0)
340
+ rad += F.pad(rad_acc, (0, 0, 1, 0), mode='constant')
341
+ rad = rad.reshape(f0.shape[0], -1, 1)
342
+ b = torch.arange(1, self.dim + 1, dtype=f0.dtype, device=f0.device).reshape(1, 1, -1)
343
+ rad *= b
344
+ rand_ini = torch.rand(1, 1, self.dim, device=f0.device)
345
+ rand_ini[..., 0] = 0
346
+ rad += rand_ini
347
+ sines = torch.sin(2 * np.pi * rad)
348
+ return sines
349
+
350
+ def forward(self, f0: torch.Tensor, upp: int):
351
+ """sine_tensor, uv = forward(f0)
352
+ input F0: tensor(batchsize=1, length, dim=1)
353
+ f0 for unvoiced steps should be 0
354
+ output sine_tensor: tensor(batchsize=1, length, dim)
355
+ output uv: tensor(batchsize=1, length, 1)
356
+ """
357
+ with torch.no_grad():
358
+ f0 = f0.unsqueeze(-1)
359
+ sine_waves = self._f02sine(f0, upp) * self.sine_amp
360
+ uv = self._f02uv(f0)
361
+ uv = F.interpolate(
362
+ uv.transpose(2, 1), scale_factor=float(upp), mode="nearest"
363
+ ).transpose(2, 1)
364
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
365
+ noise = noise_amp * torch.randn_like(sine_waves)
366
+ sine_waves = sine_waves * uv + noise
367
+ return sine_waves, uv, noise
368
+
369
+
370
+ class SourceModuleHnNSF(torch.nn.Module):
371
+ """SourceModule for hn-nsf
372
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
373
+ add_noise_std=0.003, voiced_threshod=0)
374
+ sampling_rate: sampling_rate in Hz
375
+ harmonic_num: number of harmonic above F0 (default: 0)
376
+ sine_amp: amplitude of sine source signal (default: 0.1)
377
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
378
+ note that amplitude of noise in unvoiced is decided
379
+ by sine_amp
380
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
381
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
382
+ F0_sampled (batchsize, length, 1)
383
+ Sine_source (batchsize, length, 1)
384
+ noise_source (batchsize, length 1)
385
+ uv (batchsize, length, 1)
386
+ """
387
+
388
+ def __init__(
389
+ self,
390
+ sampling_rate,
391
+ harmonic_num=0,
392
+ sine_amp=0.1,
393
+ add_noise_std=0.003,
394
+ voiced_threshod=0,
395
+ is_half=True,
396
+ ):
397
+ super(SourceModuleHnNSF, self).__init__()
398
+
399
+ self.sine_amp = sine_amp
400
+ self.noise_std = add_noise_std
401
+ self.is_half = is_half
402
+ # to produce sine waveforms
403
+ self.l_sin_gen = SineGen(
404
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
405
+ )
406
+
407
+ # to merge source harmonics into a single excitation
408
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
409
+ self.l_tanh = torch.nn.Tanh()
410
+
411
+ def forward(self, x, upp=None):
412
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
413
+ if self.is_half:
414
+ sine_wavs = sine_wavs.half()
415
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
416
+ return sine_merge, None, None # noise, uv
417
+
418
+
419
+ class GeneratorNSF(torch.nn.Module):
420
+ def __init__(
421
+ self,
422
+ initial_channel,
423
+ resblock,
424
+ resblock_kernel_sizes,
425
+ resblock_dilation_sizes,
426
+ upsample_rates,
427
+ upsample_initial_channel,
428
+ upsample_kernel_sizes,
429
+ gin_channels,
430
+ sr,
431
+ is_half=False,
432
+ ):
433
+ super(GeneratorNSF, self).__init__()
434
+ self.num_kernels = len(resblock_kernel_sizes)
435
+ self.num_upsamples = len(upsample_rates)
436
+
437
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
438
+ self.m_source = SourceModuleHnNSF(
439
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
440
+ )
441
+ self.noise_convs = nn.ModuleList()
442
+ self.conv_pre = Conv1d(
443
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
444
+ )
445
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
446
+
447
+ self.ups = nn.ModuleList()
448
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
449
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
450
+ self.ups.append(
451
+ weight_norm(
452
+ ConvTranspose1d(
453
+ upsample_initial_channel // (2**i),
454
+ upsample_initial_channel // (2 ** (i + 1)),
455
+ k,
456
+ u,
457
+ padding=(k - u) // 2,
458
+ )
459
+ )
460
+ )
461
+ if i + 1 < len(upsample_rates):
462
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
463
+ self.noise_convs.append(
464
+ Conv1d(
465
+ 1,
466
+ c_cur,
467
+ kernel_size=stride_f0 * 2,
468
+ stride=stride_f0,
469
+ padding=stride_f0 // 2,
470
+ )
471
+ )
472
+ else:
473
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
474
+
475
+ self.resblocks = nn.ModuleList()
476
+ for i in range(len(self.ups)):
477
+ ch = upsample_initial_channel // (2 ** (i + 1))
478
+ for j, (k, d) in enumerate(
479
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
480
+ ):
481
+ self.resblocks.append(resblock(ch, k, d))
482
+
483
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
484
+ self.ups.apply(init_weights)
485
+
486
+ if gin_channels != 0:
487
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
488
+
489
+ self.upp = np.prod(upsample_rates)
490
+
491
+ def forward(self, x, f0, g=None):
492
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
493
+ har_source = har_source.transpose(1, 2)
494
+ x = self.conv_pre(x)
495
+ if g is not None:
496
+ x = x + self.cond(g)
497
+
498
+ for i in range(self.num_upsamples):
499
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
500
+ x = self.ups[i](x)
501
+ x_source = self.noise_convs[i](har_source)
502
+ x = x + x_source
503
+ xs = None
504
+ for j in range(self.num_kernels):
505
+ if xs is None:
506
+ xs = self.resblocks[i * self.num_kernels + j](x)
507
+ else:
508
+ xs += self.resblocks[i * self.num_kernels + j](x)
509
+ x = xs / self.num_kernels
510
+ x = F.leaky_relu(x)
511
+ x = self.conv_post(x)
512
+ x = torch.tanh(x)
513
+ return x
514
+
515
+ def remove_weight_norm(self):
516
+ for l in self.ups:
517
+ remove_weight_norm(l)
518
+ for l in self.resblocks:
519
+ l.remove_weight_norm()
520
+
521
+
522
+ sr2sr = {
523
+ "32k": 32000,
524
+ "40k": 40000,
525
+ "48k": 48000,
526
+ }
527
+
528
+
529
+ class SynthesizerTrnMsNSFsidM(nn.Module):
530
+ def __init__(
531
+ self,
532
+ spec_channels,
533
+ segment_size,
534
+ inter_channels,
535
+ hidden_channels,
536
+ filter_channels,
537
+ n_heads,
538
+ n_layers,
539
+ kernel_size,
540
+ p_dropout,
541
+ resblock,
542
+ resblock_kernel_sizes,
543
+ resblock_dilation_sizes,
544
+ upsample_rates,
545
+ upsample_initial_channel,
546
+ upsample_kernel_sizes,
547
+ spk_embed_dim,
548
+ gin_channels,
549
+ sr,
550
+ version,
551
+ **kwargs,
552
+ ):
553
+ super().__init__()
554
+ if type(sr) == type("strr"):
555
+ sr = sr2sr[sr]
556
+ self.spec_channels = spec_channels
557
+ self.inter_channels = inter_channels
558
+ self.hidden_channels = hidden_channels
559
+ self.filter_channels = filter_channels
560
+ self.n_heads = n_heads
561
+ self.n_layers = n_layers
562
+ self.kernel_size = kernel_size
563
+ self.p_dropout = p_dropout
564
+ self.resblock = resblock
565
+ self.resblock_kernel_sizes = resblock_kernel_sizes
566
+ self.resblock_dilation_sizes = resblock_dilation_sizes
567
+ self.upsample_rates = upsample_rates
568
+ self.upsample_initial_channel = upsample_initial_channel
569
+ self.upsample_kernel_sizes = upsample_kernel_sizes
570
+ self.segment_size = segment_size
571
+ self.gin_channels = gin_channels
572
+ # self.hop_length = hop_length#
573
+ self.spk_embed_dim = spk_embed_dim
574
+ if version == "v1":
575
+ self.enc_p = TextEncoder256(
576
+ inter_channels,
577
+ hidden_channels,
578
+ filter_channels,
579
+ n_heads,
580
+ n_layers,
581
+ kernel_size,
582
+ p_dropout,
583
+ )
584
+ else:
585
+ self.enc_p = TextEncoder768(
586
+ inter_channels,
587
+ hidden_channels,
588
+ filter_channels,
589
+ n_heads,
590
+ n_layers,
591
+ kernel_size,
592
+ p_dropout,
593
+ )
594
+ self.dec = GeneratorNSF(
595
+ inter_channels,
596
+ resblock,
597
+ resblock_kernel_sizes,
598
+ resblock_dilation_sizes,
599
+ upsample_rates,
600
+ upsample_initial_channel,
601
+ upsample_kernel_sizes,
602
+ gin_channels=gin_channels,
603
+ sr=sr,
604
+ is_half=kwargs["is_half"],
605
+ )
606
+ self.enc_q = PosteriorEncoder(
607
+ spec_channels,
608
+ inter_channels,
609
+ hidden_channels,
610
+ 5,
611
+ 1,
612
+ 16,
613
+ gin_channels=gin_channels,
614
+ )
615
+ self.flow = ResidualCouplingBlock(
616
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
617
+ )
618
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
619
+ self.speaker_map = None
620
+ logger.debug(
621
+ f"gin_channels: {gin_channels}, self.spk_embed_dim: {self.spk_embed_dim}"
622
+ )
623
+
624
+ def remove_weight_norm(self):
625
+ self.dec.remove_weight_norm()
626
+ self.flow.remove_weight_norm()
627
+ self.enc_q.remove_weight_norm()
628
+
629
+ def construct_spkmixmap(self, n_speaker):
630
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
631
+ for i in range(n_speaker):
632
+ self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
633
+ self.speaker_map = self.speaker_map.unsqueeze(0)
634
+
635
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
636
+ if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
637
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
638
+ g = g * self.speaker_map # [N, S, B, 1, H]
639
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
640
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
641
+ else:
642
+ g = g.unsqueeze(0)
643
+ g = self.emb_g(g).transpose(1, 2)
644
+
645
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
646
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
647
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
648
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
649
+ return o
650
+
651
+
652
+ class MultiPeriodDiscriminator(torch.nn.Module):
653
+ def __init__(self, use_spectral_norm=False):
654
+ super(MultiPeriodDiscriminator, self).__init__()
655
+ periods = [2, 3, 5, 7, 11, 17]
656
+ # periods = [3, 5, 7, 11, 17, 23, 37]
657
+
658
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
659
+ discs = discs + [
660
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
661
+ ]
662
+ self.discriminators = nn.ModuleList(discs)
663
+
664
+ def forward(self, y, y_hat):
665
+ y_d_rs = [] #
666
+ y_d_gs = []
667
+ fmap_rs = []
668
+ fmap_gs = []
669
+ for i, d in enumerate(self.discriminators):
670
+ y_d_r, fmap_r = d(y)
671
+ y_d_g, fmap_g = d(y_hat)
672
+ # for j in range(len(fmap_r)):
673
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
674
+ y_d_rs.append(y_d_r)
675
+ y_d_gs.append(y_d_g)
676
+ fmap_rs.append(fmap_r)
677
+ fmap_gs.append(fmap_g)
678
+
679
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
680
+
681
+
682
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
683
+ def __init__(self, use_spectral_norm=False):
684
+ super(MultiPeriodDiscriminatorV2, self).__init__()
685
+ # periods = [2, 3, 5, 7, 11, 17]
686
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
687
+
688
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
689
+ discs = discs + [
690
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
691
+ ]
692
+ self.discriminators = nn.ModuleList(discs)
693
+
694
+ def forward(self, y, y_hat):
695
+ y_d_rs = [] #
696
+ y_d_gs = []
697
+ fmap_rs = []
698
+ fmap_gs = []
699
+ for i, d in enumerate(self.discriminators):
700
+ y_d_r, fmap_r = d(y)
701
+ y_d_g, fmap_g = d(y_hat)
702
+ # for j in range(len(fmap_r)):
703
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
704
+ y_d_rs.append(y_d_r)
705
+ y_d_gs.append(y_d_g)
706
+ fmap_rs.append(fmap_r)
707
+ fmap_gs.append(fmap_g)
708
+
709
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
710
+
711
+
712
+ class DiscriminatorS(torch.nn.Module):
713
+ def __init__(self, use_spectral_norm=False):
714
+ super(DiscriminatorS, self).__init__()
715
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
716
+ self.convs = nn.ModuleList(
717
+ [
718
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
719
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
720
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
721
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
722
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
723
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
724
+ ]
725
+ )
726
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
727
+
728
+ def forward(self, x):
729
+ fmap = []
730
+
731
+ for l in self.convs:
732
+ x = l(x)
733
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
734
+ fmap.append(x)
735
+ x = self.conv_post(x)
736
+ fmap.append(x)
737
+ x = torch.flatten(x, 1, -1)
738
+
739
+ return x, fmap
740
+
741
+
742
+ class DiscriminatorP(torch.nn.Module):
743
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
744
+ super(DiscriminatorP, self).__init__()
745
+ self.period = period
746
+ self.use_spectral_norm = use_spectral_norm
747
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
748
+ self.convs = nn.ModuleList(
749
+ [
750
+ norm_f(
751
+ Conv2d(
752
+ 1,
753
+ 32,
754
+ (kernel_size, 1),
755
+ (stride, 1),
756
+ padding=(get_padding(kernel_size, 1), 0),
757
+ )
758
+ ),
759
+ norm_f(
760
+ Conv2d(
761
+ 32,
762
+ 128,
763
+ (kernel_size, 1),
764
+ (stride, 1),
765
+ padding=(get_padding(kernel_size, 1), 0),
766
+ )
767
+ ),
768
+ norm_f(
769
+ Conv2d(
770
+ 128,
771
+ 512,
772
+ (kernel_size, 1),
773
+ (stride, 1),
774
+ padding=(get_padding(kernel_size, 1), 0),
775
+ )
776
+ ),
777
+ norm_f(
778
+ Conv2d(
779
+ 512,
780
+ 1024,
781
+ (kernel_size, 1),
782
+ (stride, 1),
783
+ padding=(get_padding(kernel_size, 1), 0),
784
+ )
785
+ ),
786
+ norm_f(
787
+ Conv2d(
788
+ 1024,
789
+ 1024,
790
+ (kernel_size, 1),
791
+ 1,
792
+ padding=(get_padding(kernel_size, 1), 0),
793
+ )
794
+ ),
795
+ ]
796
+ )
797
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
798
+
799
+ def forward(self, x):
800
+ fmap = []
801
+
802
+ # 1d to 2d
803
+ b, c, t = x.shape
804
+ if t % self.period != 0: # pad first
805
+ n_pad = self.period - (t % self.period)
806
+ x = F.pad(x, (0, n_pad), "reflect")
807
+ t = t + n_pad
808
+ x = x.view(b, c, t // self.period, self.period)
809
+
810
+ for l in self.convs:
811
+ x = l(x)
812
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
813
+ fmap.append(x)
814
+ x = self.conv_post(x)
815
+ fmap.append(x)
816
+ x = torch.flatten(x, 1, -1)
817
+
818
+ return x, fmap
libs/infer_packs/modules.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ from typing import Optional, Tuple
4
+
5
+ import numpy as np
6
+ import scipy
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
10
+ from torch.nn import functional as F
11
+ from torch.nn.utils import remove_weight_norm, weight_norm
12
+
13
+ from infer.lib.infer_pack import commons
14
+ from infer.lib.infer_pack.commons import get_padding, init_weights
15
+ from infer.lib.infer_pack.transforms import piecewise_rational_quadratic_transform
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super(LayerNorm, self).__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(
37
+ self,
38
+ in_channels,
39
+ hidden_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ n_layers,
43
+ p_dropout,
44
+ ):
45
+ super(ConvReluNorm, self).__init__()
46
+ self.in_channels = in_channels
47
+ self.hidden_channels = hidden_channels
48
+ self.out_channels = out_channels
49
+ self.kernel_size = kernel_size
50
+ self.n_layers = n_layers
51
+ self.p_dropout = float(p_dropout)
52
+ assert n_layers > 1, "Number of layers should be larger than 0."
53
+
54
+ self.conv_layers = nn.ModuleList()
55
+ self.norm_layers = nn.ModuleList()
56
+ self.conv_layers.append(
57
+ nn.Conv1d(
58
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
+ )
60
+ )
61
+ self.norm_layers.append(LayerNorm(hidden_channels))
62
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(float(p_dropout)))
63
+ for _ in range(n_layers - 1):
64
+ self.conv_layers.append(
65
+ nn.Conv1d(
66
+ hidden_channels,
67
+ hidden_channels,
68
+ kernel_size,
69
+ padding=kernel_size // 2,
70
+ )
71
+ )
72
+ self.norm_layers.append(LayerNorm(hidden_channels))
73
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
+ self.proj.weight.data.zero_()
75
+ self.proj.bias.data.zero_()
76
+
77
+ def forward(self, x, x_mask):
78
+ x_org = x
79
+ for i in range(self.n_layers):
80
+ x = self.conv_layers[i](x * x_mask)
81
+ x = self.norm_layers[i](x)
82
+ x = self.relu_drop(x)
83
+ x = x_org + self.proj(x)
84
+ return x * x_mask
85
+
86
+
87
+ class DDSConv(nn.Module):
88
+ """
89
+ Dialted and Depth-Separable Convolution
90
+ """
91
+
92
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
+ super(DDSConv, self).__init__()
94
+ self.channels = channels
95
+ self.kernel_size = kernel_size
96
+ self.n_layers = n_layers
97
+ self.p_dropout = float(p_dropout)
98
+
99
+ self.drop = nn.Dropout(float(p_dropout))
100
+ self.convs_sep = nn.ModuleList()
101
+ self.convs_1x1 = nn.ModuleList()
102
+ self.norms_1 = nn.ModuleList()
103
+ self.norms_2 = nn.ModuleList()
104
+ for i in range(n_layers):
105
+ dilation = kernel_size**i
106
+ padding = (kernel_size * dilation - dilation) // 2
107
+ self.convs_sep.append(
108
+ nn.Conv1d(
109
+ channels,
110
+ channels,
111
+ kernel_size,
112
+ groups=channels,
113
+ dilation=dilation,
114
+ padding=padding,
115
+ )
116
+ )
117
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
+ self.norms_1.append(LayerNorm(channels))
119
+ self.norms_2.append(LayerNorm(channels))
120
+
121
+ def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):
122
+ if g is not None:
123
+ x = x + g
124
+ for i in range(self.n_layers):
125
+ y = self.convs_sep[i](x * x_mask)
126
+ y = self.norms_1[i](y)
127
+ y = F.gelu(y)
128
+ y = self.convs_1x1[i](y)
129
+ y = self.norms_2[i](y)
130
+ y = F.gelu(y)
131
+ y = self.drop(y)
132
+ x = x + y
133
+ return x * x_mask
134
+
135
+
136
+ class WN(torch.nn.Module):
137
+ def __init__(
138
+ self,
139
+ hidden_channels,
140
+ kernel_size,
141
+ dilation_rate,
142
+ n_layers,
143
+ gin_channels=0,
144
+ p_dropout=0,
145
+ ):
146
+ super(WN, self).__init__()
147
+ assert kernel_size % 2 == 1
148
+ self.hidden_channels = hidden_channels
149
+ self.kernel_size = (kernel_size,)
150
+ self.dilation_rate = dilation_rate
151
+ self.n_layers = n_layers
152
+ self.gin_channels = gin_channels
153
+ self.p_dropout = float(p_dropout)
154
+
155
+ self.in_layers = torch.nn.ModuleList()
156
+ self.res_skip_layers = torch.nn.ModuleList()
157
+ self.drop = nn.Dropout(float(p_dropout))
158
+
159
+ if gin_channels != 0:
160
+ cond_layer = torch.nn.Conv1d(
161
+ gin_channels, 2 * hidden_channels * n_layers, 1
162
+ )
163
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
+
165
+ for i in range(n_layers):
166
+ dilation = dilation_rate**i
167
+ padding = int((kernel_size * dilation - dilation) / 2)
168
+ in_layer = torch.nn.Conv1d(
169
+ hidden_channels,
170
+ 2 * hidden_channels,
171
+ kernel_size,
172
+ dilation=dilation,
173
+ padding=padding,
174
+ )
175
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
+ self.in_layers.append(in_layer)
177
+
178
+ # last one is not necessary
179
+ if i < n_layers - 1:
180
+ res_skip_channels = 2 * hidden_channels
181
+ else:
182
+ res_skip_channels = hidden_channels
183
+
184
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
+ self.res_skip_layers.append(res_skip_layer)
187
+
188
+ def forward(
189
+ self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
190
+ ):
191
+ output = torch.zeros_like(x)
192
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
193
+
194
+ if g is not None:
195
+ g = self.cond_layer(g)
196
+
197
+ for i, (in_layer, res_skip_layer) in enumerate(
198
+ zip(self.in_layers, self.res_skip_layers)
199
+ ):
200
+ x_in = in_layer(x)
201
+ if g is not None:
202
+ cond_offset = i * 2 * self.hidden_channels
203
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
204
+ else:
205
+ g_l = torch.zeros_like(x_in)
206
+
207
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
208
+ acts = self.drop(acts)
209
+
210
+ res_skip_acts = res_skip_layer(acts)
211
+ if i < self.n_layers - 1:
212
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
213
+ x = (x + res_acts) * x_mask
214
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
215
+ else:
216
+ output = output + res_skip_acts
217
+ return output * x_mask
218
+
219
+ def remove_weight_norm(self):
220
+ if self.gin_channels != 0:
221
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
222
+ for l in self.in_layers:
223
+ torch.nn.utils.remove_weight_norm(l)
224
+ for l in self.res_skip_layers:
225
+ torch.nn.utils.remove_weight_norm(l)
226
+
227
+ def __prepare_scriptable__(self):
228
+ if self.gin_channels != 0:
229
+ for hook in self.cond_layer._forward_pre_hooks.values():
230
+ if (
231
+ hook.__module__ == "torch.nn.utils.weight_norm"
232
+ and hook.__class__.__name__ == "WeightNorm"
233
+ ):
234
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
235
+ for l in self.in_layers:
236
+ for hook in l._forward_pre_hooks.values():
237
+ if (
238
+ hook.__module__ == "torch.nn.utils.weight_norm"
239
+ and hook.__class__.__name__ == "WeightNorm"
240
+ ):
241
+ torch.nn.utils.remove_weight_norm(l)
242
+ for l in self.res_skip_layers:
243
+ for hook in l._forward_pre_hooks.values():
244
+ if (
245
+ hook.__module__ == "torch.nn.utils.weight_norm"
246
+ and hook.__class__.__name__ == "WeightNorm"
247
+ ):
248
+ torch.nn.utils.remove_weight_norm(l)
249
+ return self
250
+
251
+
252
+ class ResBlock1(torch.nn.Module):
253
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
254
+ super(ResBlock1, self).__init__()
255
+ self.convs1 = nn.ModuleList(
256
+ [
257
+ weight_norm(
258
+ Conv1d(
259
+ channels,
260
+ channels,
261
+ kernel_size,
262
+ 1,
263
+ dilation=dilation[0],
264
+ padding=get_padding(kernel_size, dilation[0]),
265
+ )
266
+ ),
267
+ weight_norm(
268
+ Conv1d(
269
+ channels,
270
+ channels,
271
+ kernel_size,
272
+ 1,
273
+ dilation=dilation[1],
274
+ padding=get_padding(kernel_size, dilation[1]),
275
+ )
276
+ ),
277
+ weight_norm(
278
+ Conv1d(
279
+ channels,
280
+ channels,
281
+ kernel_size,
282
+ 1,
283
+ dilation=dilation[2],
284
+ padding=get_padding(kernel_size, dilation[2]),
285
+ )
286
+ ),
287
+ ]
288
+ )
289
+ self.convs1.apply(init_weights)
290
+
291
+ self.convs2 = nn.ModuleList(
292
+ [
293
+ weight_norm(
294
+ Conv1d(
295
+ channels,
296
+ channels,
297
+ kernel_size,
298
+ 1,
299
+ dilation=1,
300
+ padding=get_padding(kernel_size, 1),
301
+ )
302
+ ),
303
+ weight_norm(
304
+ Conv1d(
305
+ channels,
306
+ channels,
307
+ kernel_size,
308
+ 1,
309
+ dilation=1,
310
+ padding=get_padding(kernel_size, 1),
311
+ )
312
+ ),
313
+ weight_norm(
314
+ Conv1d(
315
+ channels,
316
+ channels,
317
+ kernel_size,
318
+ 1,
319
+ dilation=1,
320
+ padding=get_padding(kernel_size, 1),
321
+ )
322
+ ),
323
+ ]
324
+ )
325
+ self.convs2.apply(init_weights)
326
+ self.lrelu_slope = LRELU_SLOPE
327
+
328
+ def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None):
329
+ for c1, c2 in zip(self.convs1, self.convs2):
330
+ xt = F.leaky_relu(x, self.lrelu_slope)
331
+ if x_mask is not None:
332
+ xt = xt * x_mask
333
+ xt = c1(xt)
334
+ xt = F.leaky_relu(xt, self.lrelu_slope)
335
+ if x_mask is not None:
336
+ xt = xt * x_mask
337
+ xt = c2(xt)
338
+ x = xt + x
339
+ if x_mask is not None:
340
+ x = x * x_mask
341
+ return x
342
+
343
+ def remove_weight_norm(self):
344
+ for l in self.convs1:
345
+ remove_weight_norm(l)
346
+ for l in self.convs2:
347
+ remove_weight_norm(l)
348
+
349
+ def __prepare_scriptable__(self):
350
+ for l in self.convs1:
351
+ for hook in l._forward_pre_hooks.values():
352
+ if (
353
+ hook.__module__ == "torch.nn.utils.weight_norm"
354
+ and hook.__class__.__name__ == "WeightNorm"
355
+ ):
356
+ torch.nn.utils.remove_weight_norm(l)
357
+ for l in self.convs2:
358
+ for hook in l._forward_pre_hooks.values():
359
+ if (
360
+ hook.__module__ == "torch.nn.utils.weight_norm"
361
+ and hook.__class__.__name__ == "WeightNorm"
362
+ ):
363
+ torch.nn.utils.remove_weight_norm(l)
364
+ return self
365
+
366
+
367
+ class ResBlock2(torch.nn.Module):
368
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
369
+ super(ResBlock2, self).__init__()
370
+ self.convs = nn.ModuleList(
371
+ [
372
+ weight_norm(
373
+ Conv1d(
374
+ channels,
375
+ channels,
376
+ kernel_size,
377
+ 1,
378
+ dilation=dilation[0],
379
+ padding=get_padding(kernel_size, dilation[0]),
380
+ )
381
+ ),
382
+ weight_norm(
383
+ Conv1d(
384
+ channels,
385
+ channels,
386
+ kernel_size,
387
+ 1,
388
+ dilation=dilation[1],
389
+ padding=get_padding(kernel_size, dilation[1]),
390
+ )
391
+ ),
392
+ ]
393
+ )
394
+ self.convs.apply(init_weights)
395
+ self.lrelu_slope = LRELU_SLOPE
396
+
397
+ def forward(self, x, x_mask: Optional[torch.Tensor] = None):
398
+ for c in self.convs:
399
+ xt = F.leaky_relu(x, self.lrelu_slope)
400
+ if x_mask is not None:
401
+ xt = xt * x_mask
402
+ xt = c(xt)
403
+ x = xt + x
404
+ if x_mask is not None:
405
+ x = x * x_mask
406
+ return x
407
+
408
+ def remove_weight_norm(self):
409
+ for l in self.convs:
410
+ remove_weight_norm(l)
411
+
412
+ def __prepare_scriptable__(self):
413
+ for l in self.convs:
414
+ for hook in l._forward_pre_hooks.values():
415
+ if (
416
+ hook.__module__ == "torch.nn.utils.weight_norm"
417
+ and hook.__class__.__name__ == "WeightNorm"
418
+ ):
419
+ torch.nn.utils.remove_weight_norm(l)
420
+ return self
421
+
422
+
423
+ class Log(nn.Module):
424
+ def forward(
425
+ self,
426
+ x: torch.Tensor,
427
+ x_mask: torch.Tensor,
428
+ g: Optional[torch.Tensor] = None,
429
+ reverse: bool = False,
430
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
431
+ if not reverse:
432
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
433
+ logdet = torch.sum(-y, [1, 2])
434
+ return y, logdet
435
+ else:
436
+ x = torch.exp(x) * x_mask
437
+ return x
438
+
439
+
440
+ class Flip(nn.Module):
441
+ # torch.jit.script() Compiled functions \
442
+ # can't take variable number of arguments or \
443
+ # use keyword-only arguments with defaults
444
+ def forward(
445
+ self,
446
+ x: torch.Tensor,
447
+ x_mask: torch.Tensor,
448
+ g: Optional[torch.Tensor] = None,
449
+ reverse: bool = False,
450
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
451
+ x = torch.flip(x, [1])
452
+ if not reverse:
453
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
454
+ return x, logdet
455
+ else:
456
+ return x, torch.zeros([1], device=x.device)
457
+
458
+
459
+ class ElementwiseAffine(nn.Module):
460
+ def __init__(self, channels):
461
+ super(ElementwiseAffine, self).__init__()
462
+ self.channels = channels
463
+ self.m = nn.Parameter(torch.zeros(channels, 1))
464
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
465
+
466
+ def forward(self, x, x_mask, reverse=False, **kwargs):
467
+ if not reverse:
468
+ y = self.m + torch.exp(self.logs) * x
469
+ y = y * x_mask
470
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
471
+ return y, logdet
472
+ else:
473
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
474
+ return x
475
+
476
+
477
+ class ResidualCouplingLayer(nn.Module):
478
+ def __init__(
479
+ self,
480
+ channels,
481
+ hidden_channels,
482
+ kernel_size,
483
+ dilation_rate,
484
+ n_layers,
485
+ p_dropout=0,
486
+ gin_channels=0,
487
+ mean_only=False,
488
+ ):
489
+ assert channels % 2 == 0, "channels should be divisible by 2"
490
+ super(ResidualCouplingLayer, self).__init__()
491
+ self.channels = channels
492
+ self.hidden_channels = hidden_channels
493
+ self.kernel_size = kernel_size
494
+ self.dilation_rate = dilation_rate
495
+ self.n_layers = n_layers
496
+ self.half_channels = channels // 2
497
+ self.mean_only = mean_only
498
+
499
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
500
+ self.enc = WN(
501
+ hidden_channels,
502
+ kernel_size,
503
+ dilation_rate,
504
+ n_layers,
505
+ p_dropout=float(p_dropout),
506
+ gin_channels=gin_channels,
507
+ )
508
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
509
+ self.post.weight.data.zero_()
510
+ self.post.bias.data.zero_()
511
+
512
+ def forward(
513
+ self,
514
+ x: torch.Tensor,
515
+ x_mask: torch.Tensor,
516
+ g: Optional[torch.Tensor] = None,
517
+ reverse: bool = False,
518
+ ):
519
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
520
+ h = self.pre(x0) * x_mask
521
+ h = self.enc(h, x_mask, g=g)
522
+ stats = self.post(h) * x_mask
523
+ if not self.mean_only:
524
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
525
+ else:
526
+ m = stats
527
+ logs = torch.zeros_like(m)
528
+
529
+ if not reverse:
530
+ x1 = m + x1 * torch.exp(logs) * x_mask
531
+ x = torch.cat([x0, x1], 1)
532
+ logdet = torch.sum(logs, [1, 2])
533
+ return x, logdet
534
+ else:
535
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
536
+ x = torch.cat([x0, x1], 1)
537
+ return x, torch.zeros([1])
538
+
539
+ def remove_weight_norm(self):
540
+ self.enc.remove_weight_norm()
541
+
542
+ def __prepare_scriptable__(self):
543
+ for hook in self.enc._forward_pre_hooks.values():
544
+ if (
545
+ hook.__module__ == "torch.nn.utils.weight_norm"
546
+ and hook.__class__.__name__ == "WeightNorm"
547
+ ):
548
+ torch.nn.utils.remove_weight_norm(self.enc)
549
+ return self
550
+
551
+
552
+ class ConvFlow(nn.Module):
553
+ def __init__(
554
+ self,
555
+ in_channels,
556
+ filter_channels,
557
+ kernel_size,
558
+ n_layers,
559
+ num_bins=10,
560
+ tail_bound=5.0,
561
+ ):
562
+ super(ConvFlow, self).__init__()
563
+ self.in_channels = in_channels
564
+ self.filter_channels = filter_channels
565
+ self.kernel_size = kernel_size
566
+ self.n_layers = n_layers
567
+ self.num_bins = num_bins
568
+ self.tail_bound = tail_bound
569
+ self.half_channels = in_channels // 2
570
+
571
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
572
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
573
+ self.proj = nn.Conv1d(
574
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
575
+ )
576
+ self.proj.weight.data.zero_()
577
+ self.proj.bias.data.zero_()
578
+
579
+ def forward(
580
+ self,
581
+ x: torch.Tensor,
582
+ x_mask: torch.Tensor,
583
+ g: Optional[torch.Tensor] = None,
584
+ reverse=False,
585
+ ):
586
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
587
+ h = self.pre(x0)
588
+ h = self.convs(h, x_mask, g=g)
589
+ h = self.proj(h) * x_mask
590
+
591
+ b, c, t = x0.shape
592
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
593
+
594
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
595
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
596
+ self.filter_channels
597
+ )
598
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
599
+
600
+ x1, logabsdet = piecewise_rational_quadratic_transform(
601
+ x1,
602
+ unnormalized_widths,
603
+ unnormalized_heights,
604
+ unnormalized_derivatives,
605
+ inverse=reverse,
606
+ tails="linear",
607
+ tail_bound=self.tail_bound,
608
+ )
609
+
610
+ x = torch.cat([x0, x1], 1) * x_mask
611
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
612
+ if not reverse:
613
+ return x, logdet
614
+ else:
615
+ return x
libs/infer_packs/onnx_inference.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import numpy as np
3
+ import onnxruntime
4
+ import soundfile
5
+
6
+ import logging
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class ContentVec:
12
+ def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
13
+ logger.info("Load model(s) from {}".format(vec_path))
14
+ if device == "cpu" or device is None:
15
+ providers = ["CPUExecutionProvider"]
16
+ elif device == "cuda":
17
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
18
+ elif device == "dml":
19
+ providers = ["DmlExecutionProvider"]
20
+ else:
21
+ raise RuntimeError("Unsportted Device")
22
+ self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
23
+
24
+ def __call__(self, wav):
25
+ return self.forward(wav)
26
+
27
+ def forward(self, wav):
28
+ feats = wav
29
+ if feats.ndim == 2: # double channels
30
+ feats = feats.mean(-1)
31
+ assert feats.ndim == 1, feats.ndim
32
+ feats = np.expand_dims(np.expand_dims(feats, 0), 0)
33
+ onnx_input = {self.model.get_inputs()[0].name: feats}
34
+ logits = self.model.run(None, onnx_input)[0]
35
+ return logits.transpose(0, 2, 1)
36
+
37
+
38
+ def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
39
+ if f0_predictor == "pm":
40
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
41
+
42
+ f0_predictor_object = PMF0Predictor(
43
+ hop_length=hop_length, sampling_rate=sampling_rate
44
+ )
45
+ elif f0_predictor == "harvest":
46
+ from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
47
+ HarvestF0Predictor,
48
+ )
49
+
50
+ f0_predictor_object = HarvestF0Predictor(
51
+ hop_length=hop_length, sampling_rate=sampling_rate
52
+ )
53
+ elif f0_predictor == "dio":
54
+ from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
55
+
56
+ f0_predictor_object = DioF0Predictor(
57
+ hop_length=hop_length, sampling_rate=sampling_rate
58
+ )
59
+ else:
60
+ raise Exception("Unknown f0 predictor")
61
+ return f0_predictor_object
62
+
63
+
64
+ class OnnxRVC:
65
+ def __init__(
66
+ self,
67
+ model_path,
68
+ sr=40000,
69
+ hop_size=512,
70
+ vec_path="vec-768-layer-12",
71
+ device="cpu",
72
+ ):
73
+ vec_path = f"pretrained/{vec_path}.onnx"
74
+ self.vec_model = ContentVec(vec_path, device)
75
+ if device == "cpu" or device is None:
76
+ providers = ["CPUExecutionProvider"]
77
+ elif device == "cuda":
78
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
79
+ elif device == "dml":
80
+ providers = ["DmlExecutionProvider"]
81
+ else:
82
+ raise RuntimeError("Unsportted Device")
83
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
84
+ self.sampling_rate = sr
85
+ self.hop_size = hop_size
86
+
87
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
88
+ onnx_input = {
89
+ self.model.get_inputs()[0].name: hubert,
90
+ self.model.get_inputs()[1].name: hubert_length,
91
+ self.model.get_inputs()[2].name: pitch,
92
+ self.model.get_inputs()[3].name: pitchf,
93
+ self.model.get_inputs()[4].name: ds,
94
+ self.model.get_inputs()[5].name: rnd,
95
+ }
96
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
97
+
98
+ def inference(
99
+ self,
100
+ raw_path,
101
+ sid,
102
+ f0_method="dio",
103
+ f0_up_key=0,
104
+ pad_time=0.5,
105
+ cr_threshold=0.02,
106
+ ):
107
+ f0_min = 50
108
+ f0_max = 1100
109
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
110
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
111
+ f0_predictor = get_f0_predictor(
112
+ f0_method,
113
+ hop_length=self.hop_size,
114
+ sampling_rate=self.sampling_rate,
115
+ threshold=cr_threshold,
116
+ )
117
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
118
+ org_length = len(wav)
119
+ if org_length / sr > 50.0:
120
+ raise RuntimeError("Reached Max Length")
121
+
122
+ wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
123
+ wav16k = wav16k
124
+
125
+ hubert = self.vec_model(wav16k)
126
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
127
+ hubert_length = hubert.shape[1]
128
+
129
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
130
+ pitchf = pitchf * 2 ** (f0_up_key / 12)
131
+ pitch = pitchf.copy()
132
+ f0_mel = 1127 * np.log(1 + pitch / 700)
133
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
134
+ f0_mel_max - f0_mel_min
135
+ ) + 1
136
+ f0_mel[f0_mel <= 1] = 1
137
+ f0_mel[f0_mel > 255] = 255
138
+ pitch = np.rint(f0_mel).astype(np.int64)
139
+
140
+ pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
141
+ pitch = pitch.reshape(1, len(pitch))
142
+ ds = np.array([sid]).astype(np.int64)
143
+
144
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
145
+ hubert_length = np.array([hubert_length]).astype(np.int64)
146
+
147
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
148
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
149
+ return out_wav[0:org_length]
libs/infer_packs/transforms.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch.nn import functional as F
4
+
5
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
6
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
7
+ DEFAULT_MIN_DERIVATIVE = 1e-3
8
+
9
+
10
+ def piecewise_rational_quadratic_transform(
11
+ inputs,
12
+ unnormalized_widths,
13
+ unnormalized_heights,
14
+ unnormalized_derivatives,
15
+ inverse=False,
16
+ tails=None,
17
+ tail_bound=1.0,
18
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
19
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
20
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
21
+ ):
22
+ if tails is None:
23
+ spline_fn = rational_quadratic_spline
24
+ spline_kwargs = {}
25
+ else:
26
+ spline_fn = unconstrained_rational_quadratic_spline
27
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
28
+
29
+ outputs, logabsdet = spline_fn(
30
+ inputs=inputs,
31
+ unnormalized_widths=unnormalized_widths,
32
+ unnormalized_heights=unnormalized_heights,
33
+ unnormalized_derivatives=unnormalized_derivatives,
34
+ inverse=inverse,
35
+ min_bin_width=min_bin_width,
36
+ min_bin_height=min_bin_height,
37
+ min_derivative=min_derivative,
38
+ **spline_kwargs
39
+ )
40
+ return outputs, logabsdet
41
+
42
+
43
+ def searchsorted(bin_locations, inputs, eps=1e-6):
44
+ bin_locations[..., -1] += eps
45
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
46
+
47
+
48
+ def unconstrained_rational_quadratic_spline(
49
+ inputs,
50
+ unnormalized_widths,
51
+ unnormalized_heights,
52
+ unnormalized_derivatives,
53
+ inverse=False,
54
+ tails="linear",
55
+ tail_bound=1.0,
56
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
57
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
58
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
59
+ ):
60
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
61
+ outside_interval_mask = ~inside_interval_mask
62
+
63
+ outputs = torch.zeros_like(inputs)
64
+ logabsdet = torch.zeros_like(inputs)
65
+
66
+ if tails == "linear":
67
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
68
+ constant = np.log(np.exp(1 - min_derivative) - 1)
69
+ unnormalized_derivatives[..., 0] = constant
70
+ unnormalized_derivatives[..., -1] = constant
71
+
72
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
73
+ logabsdet[outside_interval_mask] = 0
74
+ else:
75
+ raise RuntimeError("{} tails are not implemented.".format(tails))
76
+
77
+ (
78
+ outputs[inside_interval_mask],
79
+ logabsdet[inside_interval_mask],
80
+ ) = rational_quadratic_spline(
81
+ inputs=inputs[inside_interval_mask],
82
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
83
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
84
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
85
+ inverse=inverse,
86
+ left=-tail_bound,
87
+ right=tail_bound,
88
+ bottom=-tail_bound,
89
+ top=tail_bound,
90
+ min_bin_width=min_bin_width,
91
+ min_bin_height=min_bin_height,
92
+ min_derivative=min_derivative,
93
+ )
94
+
95
+ return outputs, logabsdet
96
+
97
+
98
+ def rational_quadratic_spline(
99
+ inputs,
100
+ unnormalized_widths,
101
+ unnormalized_heights,
102
+ unnormalized_derivatives,
103
+ inverse=False,
104
+ left=0.0,
105
+ right=1.0,
106
+ bottom=0.0,
107
+ top=1.0,
108
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
109
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
110
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
111
+ ):
112
+ if torch.min(inputs) < left or torch.max(inputs) > right:
113
+ raise ValueError("Input to a transform is not within its domain")
114
+
115
+ num_bins = unnormalized_widths.shape[-1]
116
+
117
+ if min_bin_width * num_bins > 1.0:
118
+ raise ValueError("Minimal bin width too large for the number of bins")
119
+ if min_bin_height * num_bins > 1.0:
120
+ raise ValueError("Minimal bin height too large for the number of bins")
121
+
122
+ widths = F.softmax(unnormalized_widths, dim=-1)
123
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
124
+ cumwidths = torch.cumsum(widths, dim=-1)
125
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
126
+ cumwidths = (right - left) * cumwidths + left
127
+ cumwidths[..., 0] = left
128
+ cumwidths[..., -1] = right
129
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
130
+
131
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
132
+
133
+ heights = F.softmax(unnormalized_heights, dim=-1)
134
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
135
+ cumheights = torch.cumsum(heights, dim=-1)
136
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
137
+ cumheights = (top - bottom) * cumheights + bottom
138
+ cumheights[..., 0] = bottom
139
+ cumheights[..., -1] = top
140
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
141
+
142
+ if inverse:
143
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
144
+ else:
145
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
146
+
147
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
148
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
149
+
150
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
151
+ delta = heights / widths
152
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
153
+
154
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
155
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
156
+
157
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
158
+
159
+ if inverse:
160
+ a = (inputs - input_cumheights) * (
161
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
162
+ ) + input_heights * (input_delta - input_derivatives)
163
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
164
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
165
+ )
166
+ c = -input_delta * (inputs - input_cumheights)
167
+
168
+ discriminant = b.pow(2) - 4 * a * c
169
+ assert (discriminant >= 0).all()
170
+
171
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
172
+ outputs = root * input_bin_widths + input_cumwidths
173
+
174
+ theta_one_minus_theta = root * (1 - root)
175
+ denominator = input_delta + (
176
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
177
+ * theta_one_minus_theta
178
+ )
179
+ derivative_numerator = input_delta.pow(2) * (
180
+ input_derivatives_plus_one * root.pow(2)
181
+ + 2 * input_delta * theta_one_minus_theta
182
+ + input_derivatives * (1 - root).pow(2)
183
+ )
184
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
185
+
186
+ return outputs, -logabsdet
187
+ else:
188
+ theta = (inputs - input_cumwidths) / input_bin_widths
189
+ theta_one_minus_theta = theta * (1 - theta)
190
+
191
+ numerator = input_heights * (
192
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
193
+ )
194
+ denominator = input_delta + (
195
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
196
+ * theta_one_minus_theta
197
+ )
198
+ outputs = input_cumheights + numerator / denominator
199
+
200
+ derivative_numerator = input_delta.pow(2) * (
201
+ input_derivatives_plus_one * theta.pow(2)
202
+ + 2 * input_delta * theta_one_minus_theta
203
+ + input_derivatives * (1 - theta).pow(2)
204
+ )
205
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
206
+
207
+ return outputs, logabsdet