YUNSUN7 commited on
Commit
a2c484b
·
1 Parent(s): 05e728b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1202 -1
app.py CHANGED
@@ -1 +1,1202 @@
1
- https://github.com/W1nOfGood/Rproject.git\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import fairseq
3
+ import faiss
4
+ import gradio as gr
5
+ import numpy as np
6
+ import torch
7
+ from dotenv import load_dotenv
8
+ from sklearn.cluster import MiniBatchKMeans
9
+
10
+ from configs.config import Config
11
+ from i18n.i18n import I18nAuto
12
+ from infer.lib.train.process_ckpt import (
13
+ change_info,
14
+ extract_small_model,
15
+ merge,
16
+ show_info,
17
+ )
18
+ from infer.modules.uvr5.modules import uvr
19
+ from infer.modules.vc.modules import VC
20
+ logging.getLogger("numba").setLevel(logging.WARNING)
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ tmp = os.path.join(now_dir, "TEMP")
25
+ shutil.rmtree(tmp, ignore_errors=True)
26
+ shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
27
+ shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
28
+ os.makedirs(tmp, exist_ok=True)
29
+ os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
30
+ os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True)
31
+ os.environ["TEMP"] = tmp
32
+ warnings.filterwarnings("ignore")
33
+ torch.manual_seed(114514)
34
+
35
+
36
+ load_dotenv()
37
+ config = Config()
38
+ vc = VC(config)
39
+
40
+ if config.dml == True:
41
+
42
+ def forward_dml(ctx, x, scale):
43
+ ctx.scale = scale
44
+ res = x.clone().detach()
45
+ return res
46
+
47
+ fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
48
+ i18n = I18nAuto()
49
+ logger.info(i18n)
50
+ # 判断是否有能用来训练和加速推理的N卡
51
+ ngpu = torch.cuda.device_count()
52
+ gpu_infos = []
53
+ mem = []
54
+ if_gpu_ok = False
55
+
56
+ if torch.cuda.is_available() or ngpu != 0:
57
+ for i in range(ngpu):
58
+ gpu_name = torch.cuda.get_device_name(i)
59
+ if any(
60
+ value in gpu_name.upper()
61
+ for value in [
62
+ "10",
63
+ "16",
64
+ "20",
65
+ "30",
66
+ "40",
67
+ "A2",
68
+ "A3",
69
+ "A4",
70
+ "P4",
71
+ "A50",
72
+ "500",
73
+ "A60",
74
+ "70",
75
+ "80",
76
+ "90",
77
+ "M4",
78
+ "T4",
79
+ "TITAN",
80
+ ]
81
+ ):
82
+ # A10#A100#V100#A40#P40#M40#K80#A4500
83
+ if_gpu_ok = True # 至少有一张能用的N卡
84
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
85
+ mem.append(
86
+ int(
87
+ torch.cuda.get_device_properties(i).total_memory
88
+ / 1024
89
+ / 1024
90
+ / 1024
91
+ + 0.4
92
+ )
93
+ )
94
+ if if_gpu_ok and len(gpu_infos) > 0:
95
+ gpu_info = "\n".join(gpu_infos)
96
+ default_batch_size = min(mem) // 2
97
+ else:
98
+ gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
99
+ default_batch_size = 1
100
+ gpus = "-".join([i[0] for i in gpu_infos])
101
+
102
+
103
+ class ToolButton(gr.Button, gr.components.FormComponent):
104
+ """Small button with single emoji as text, fits inside gradio forms"""
105
+
106
+ def __init__(self, **kwargs):
107
+ super().__init__(variant="tool", **kwargs)
108
+
109
+ def get_block_name(self):
110
+ return "button"
111
+
112
+
113
+ weight_root = os.getenv("weight_root")
114
+ weight_uvr5_root = os.getenv("weight_uvr5_root")
115
+ index_root = os.getenv("index_root")
116
+
117
+ names = []
118
+ for name in os.listdir(weight_root):
119
+ if name.endswith(".pth"):
120
+ names.append(name)
121
+ index_paths = []
122
+ for root, dirs, files in os.walk(index_root, topdown=False):
123
+ for name in files:
124
+ if name.endswith(".index") and "trained" not in name:
125
+ index_paths.append("%s/%s" % (root, name))
126
+ uvr5_names = []
127
+ for name in os.listdir(weight_uvr5_root):
128
+ if name.endswith(".pth") or "onnx" in name:
129
+ uvr5_names.append(name.replace(".pth", ""))
130
+
131
+
132
+ def change_choices():
133
+ names = []
134
+ for name in os.listdir(weight_root):
135
+ if name.endswith(".pth"):
136
+ names.append(name)
137
+ index_paths = []
138
+ for root, dirs, files in os.walk(index_root, topdown=False):
139
+ for name in files:
140
+ if name.endswith(".index") and "trained" not in name:
141
+ index_paths.append("%s/%s" % (root, name))
142
+ audio_files=[]
143
+ for filename in os.listdir("./audios"):
144
+ if filename.endswith(('.wav','.mp3','.ogg')):
145
+ audio_files.append('./audios/'+filename)
146
+ return {"choices": sorted(names), "__type__": "update"}, {
147
+ "choices": sorted(index_paths),
148
+ "__type__": "update",
149
+ }, {"choices": sorted(audio_files), "__type__": "update"}
150
+
151
+ def clean():
152
+ return {"value": "", "__type__": "update"}
153
+
154
+
155
+ def export_onnx():
156
+ from infer.modules.onnx.export import export_onnx as eo
157
+
158
+ eo()
159
+
160
+
161
+ sr_dict = {
162
+ "32k": 32000,
163
+ "40k": 40000,
164
+ "48k": 48000,
165
+ }
166
+
167
+
168
+ def if_done(done, p):
169
+ while 1:
170
+ if p.poll() is None:
171
+ sleep(0.5)
172
+ else:
173
+ break
174
+ done[0] = True
175
+
176
+
177
+ def if_done_multi(done, ps):
178
+ while 1:
179
+ # poll==None代表进程未结束
180
+ # 只要有一个进程未结束都不停
181
+ flag = 1
182
+ for p in ps:
183
+ if p.poll() is None:
184
+ flag = 0
185
+ sleep(0.5)
186
+ break
187
+ if flag == 1:
188
+ break
189
+ done[0] = True
190
+
191
+
192
+ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
193
+ sr = sr_dict[sr]
194
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
195
+ f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
196
+ f.close()
197
+ per = 3.0 if config.is_half else 3.7
198
+ cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % (
199
+ config.python_cmd,
200
+ trainset_dir,
201
+ sr,
202
+ n_p,
203
+ now_dir,
204
+ exp_dir,
205
+ config.noparallel,
206
+ per,
207
+ )
208
+ logger.info(cmd)
209
+ p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
210
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
211
+ done = [False]
212
+ threading.Thread(
213
+ target=if_done,
214
+ args=(
215
+ done,
216
+ p,
217
+ ),
218
+ ).start()
219
+ while 1:
220
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
221
+ yield (f.read())
222
+ sleep(1)
223
+ if done[0]:
224
+ break
225
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
226
+ log = f.read()
227
+ logger.info(log)
228
+ yield log
229
+
230
+
231
+ # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
232
+ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvpe):
233
+ gpus = gpus.split("-")
234
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
235
+ f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
236
+ f.close()
237
+ if if_f0:
238
+ if f0method != "rmvpe_gpu":
239
+ cmd = (
240
+ '"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s'
241
+ % (
242
+ config.python_cmd,
243
+ now_dir,
244
+ exp_dir,
245
+ n_p,
246
+ f0method,
247
+ )
248
+ )
249
+ logger.info(cmd)
250
+ p = Popen(
251
+ cmd, shell=True, cwd=now_dir
252
+ ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE
253
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
254
+ done = [False]
255
+ threading.Thread(
256
+ target=if_done,
257
+ args=(
258
+ done,
259
+ p,
260
+ ),
261
+ ).start()
262
+ else:
263
+ if gpus_rmvpe != "-":
264
+ gpus_rmvpe = gpus_rmvpe.split("-")
265
+ leng = len(gpus_rmvpe)
266
+ ps = []
267
+ for idx, n_g in enumerate(gpus_rmvpe):
268
+ cmd = (
269
+ '"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s '
270
+ % (
271
+ config.python_cmd,
272
+ leng,
273
+ idx,
274
+ n_g,
275
+ now_dir,
276
+ exp_dir,
277
+ config.is_half,
278
+ )
279
+ )
280
+ logger.info(cmd)
281
+ p = Popen(
282
+ cmd, shell=True, cwd=now_dir
283
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
284
+ ps.append(p)
285
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
286
+ done = [False]
287
+ threading.Thread(
288
+ target=if_done_multi, #
289
+ args=(
290
+ done,
291
+ ps,
292
+ ),
293
+ ).start()
294
+ else:
295
+ cmd = (
296
+ config.python_cmd
297
+ + ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" '
298
+ % (
299
+ now_dir,
300
+ exp_dir,
301
+ )
302
+ )
303
+ logger.info(cmd)
304
+ p = Popen(
305
+ cmd, shell=True, cwd=now_dir
306
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
307
+ p.wait()
308
+ done = [True]
309
+ while 1:
310
+ with open(
311
+ "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r"
312
+ ) as f:
313
+ yield (f.read())
314
+ sleep(1)
315
+ if done[0]:
316
+ break
317
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
318
+ log = f.read()
319
+ logger.info(log)
320
+ yield log
321
+ ####对不同part分别开多��程
322
+ """
323
+ n_part=int(sys.argv[1])
324
+ i_part=int(sys.argv[2])
325
+ i_gpu=sys.argv[3]
326
+ exp_dir=sys.argv[4]
327
+ os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
328
+ """
329
+ leng = len(gpus)
330
+ ps = []
331
+ for idx, n_g in enumerate(gpus):
332
+ cmd = (
333
+ '"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s'
334
+ % (
335
+ config.python_cmd,
336
+ config.device,
337
+ leng,
338
+ idx,
339
+ n_g,
340
+ now_dir,
341
+ exp_dir,
342
+ version19,
343
+ )
344
+ )
345
+ logger.info(cmd)
346
+ p = Popen(
347
+ cmd, shell=True, cwd=now_dir
348
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
349
+ ps.append(p)
350
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
351
+ done = [False]
352
+ threading.Thread(
353
+ target=if_done_multi,
354
+ args=(
355
+ done,
356
+ ps,
357
+ ),
358
+ ).start()
359
+ while 1:
360
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
361
+ yield (f.read())
362
+ sleep(1)
363
+ if done[0]:
364
+ break
365
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
366
+ log = f.read()
367
+ logger.info(log)
368
+ yield log
369
+
370
+
371
+ def get_pretrained_models(path_str, f0_str, sr2):
372
+ if_pretrained_generator_exist = os.access(
373
+ "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK
374
+ )
375
+ if_pretrained_discriminator_exist = os.access(
376
+ "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK
377
+ )
378
+ if not if_pretrained_generator_exist:
379
+ logger.warn(
380
+ "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model",
381
+ path_str,
382
+ f0_str,
383
+ sr2,
384
+ )
385
+ if not if_pretrained_discriminator_exist:
386
+ logger.warn(
387
+ "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model",
388
+ path_str,
389
+ f0_str,
390
+ sr2,
391
+ )
392
+ return (
393
+ "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)
394
+ if if_pretrained_generator_exist
395
+ else "",
396
+ "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)
397
+ if if_pretrained_discriminator_exist
398
+ else "",
399
+ )
400
+
401
+
402
+ def change_sr2(sr2, if_f0_3, version19):
403
+ path_str = "" if version19 == "v1" else "_v2"
404
+ f0_str = "f0" if if_f0_3 else ""
405
+ return get_pretrained_models(path_str, f0_str, sr2)
406
+
407
+
408
+ def change_version19(sr2, if_f0_3, version19):
409
+ path_str = "" if version19 == "v1" else "_v2"
410
+ if sr2 == "32k" and version19 == "v1":
411
+ sr2 = "40k"
412
+ to_return_sr2 = (
413
+ {"choices": ["40k", "48k"], "__type__": "update", "value": sr2}
414
+ if version19 == "v1"
415
+ else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2}
416
+ )
417
+ f0_str = "f0" if if_f0_3 else ""
418
+ return (
419
+ *get_pretrained_models(path_str, f0_str, sr2),
420
+ to_return_sr2,
421
+ )
422
+
423
+
424
+ def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
425
+ path_str = "" if version19 == "v1" else "_v2"
426
+ return (
427
+ {"visible": if_f0_3, "__type__": "update"},
428
+ *get_pretrained_models(path_str, "f0", sr2),
429
+ )
430
+
431
+
432
+ # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
433
+ def click_train(
434
+ exp_dir1,
435
+ sr2,
436
+ if_f0_3,
437
+ spk_id5,
438
+ save_epoch10,
439
+ total_epoch11,
440
+ batch_size12,
441
+ if_save_latest13,
442
+ pretrained_G14,
443
+ pretrained_D15,
444
+ gpus16,
445
+ if_cache_gpu17,
446
+ if_save_every_weights18,
447
+ version19,
448
+ ):
449
+ # 生成filelist
450
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
451
+ os.makedirs(exp_dir, exist_ok=True)
452
+ gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
453
+ feature_dir = (
454
+ "%s/3_feature256" % (exp_dir)
455
+ if version19 == "v1"
456
+ else "%s/3_feature768" % (exp_dir)
457
+ )
458
+ if if_f0_3:
459
+ f0_dir = "%s/2a_f0" % (exp_dir)
460
+ f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
461
+ names = (
462
+ set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
463
+ & set([name.split(".")[0] for name in os.listdir(feature_dir)])
464
+ & set([name.split(".")[0] for name in os.listdir(f0_dir)])
465
+ & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
466
+ )
467
+ else:
468
+ names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
469
+ [name.split(".")[0] for name in os.listdir(feature_dir)]
470
+ )
471
+ opt = []
472
+ for name in names:
473
+ if if_f0_3:
474
+ opt.append(
475
+ "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
476
+ % (
477
+ gt_wavs_dir.replace("\\", "\\\\"),
478
+ name,
479
+ feature_dir.replace("\\", "\\\\"),
480
+ name,
481
+ f0_dir.replace("\\", "\\\\"),
482
+ name,
483
+ f0nsf_dir.replace("\\", "\\\\"),
484
+ name,
485
+ spk_id5,
486
+ )
487
+ )
488
+ else:
489
+ opt.append(
490
+ "%s/%s.wav|%s/%s.npy|%s"
491
+ % (
492
+ gt_wavs_dir.replace("\\", "\\\\"),
493
+ name,
494
+ feature_dir.replace("\\", "\\\\"),
495
+ name,
496
+ spk_id5,
497
+ )
498
+ )
499
+ fea_dim = 256 if version19 == "v1" else 768
500
+ if if_f0_3:
501
+ for _ in range(2):
502
+ opt.append(
503
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
504
+ % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
505
+ )
506
+ else:
507
+ for _ in range(2):
508
+ opt.append(
509
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
510
+ % (now_dir, sr2, now_dir, fea_dim, spk_id5)
511
+ )
512
+ shuffle(opt)
513
+ with open("%s/filelist.txt" % exp_dir, "w") as f:
514
+ f.write("\n".join(opt))
515
+ logger.debug("Write filelist done")
516
+ # 生成config#无需生成config
517
+ # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
518
+ logger.info("Use gpus: %s", str(gpus16))
519
+ if pretrained_G14 == "":
520
+ logger.info("No pretrained Generator")
521
+ if pretrained_D15 == "":
522
+ logger.info("No pretrained Discriminator")
523
+ if version19 == "v1" or sr2 == "40k":
524
+ config_path = "v1/%s.json" % sr2
525
+ else:
526
+ config_path = "v2/%s.json" % sr2
527
+ config_save_path = os.path.join(exp_dir, "config.json")
528
+ if not pathlib.Path(config_save_path).exists():
529
+ with open(config_save_path, "w", encoding="utf-8") as f:
530
+ json.dump(
531
+ config.json_config[config_path],
532
+ f,
533
+ ensure_ascii=False,
534
+ indent=4,
535
+ sort_keys=True,
536
+ )
537
+ f.write("\n")
538
+ if gpus16:
539
+ cmd = (
540
+ '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s'
541
+ % (
542
+ config.python_cmd,
543
+ exp_dir1,
544
+ sr2,
545
+ 1 if if_f0_3 else 0,
546
+ batch_size12,
547
+ gpus16,
548
+ total_epoch11,
549
+ save_epoch10,
550
+ "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "",
551
+ "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "",
552
+ 1 if if_save_latest13 == i18n("是") else 0,
553
+ 1 if if_cache_gpu17 == i18n("是") else 0,
554
+ 1 if if_save_every_weights18 == i18n("是") else 0,
555
+ version19,
556
+ )
557
+ )
558
+ else:
559
+ cmd = (
560
+ '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s'
561
+ % (
562
+ config.python_cmd,
563
+ exp_dir1,
564
+ sr2,
565
+ 1 if if_f0_3 else 0,
566
+ batch_size12,
567
+ total_epoch11,
568
+ save_epoch10,
569
+ "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "",
570
+ "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "",
571
+ 1 if if_save_latest13 == i18n("是") else 0,
572
+ 1 if if_cache_gpu17 == i18n("是") else 0,
573
+ 1 if if_save_every_weights18 == i18n("是") else 0,
574
+ version19,
575
+ )
576
+ )
577
+ logger.info(cmd)
578
+ p = Popen(cmd, shell=True, cwd=now_dir)
579
+ p.wait()
580
+ return "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"
581
+
582
+
583
+ # but4.click(train_index, [exp_dir1], info3)
584
+ def train_index(exp_dir1, version19):
585
+ # exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
586
+ exp_dir = "logs/%s" % (exp_dir1)
587
+ os.makedirs(exp_dir, exist_ok=True)
588
+ feature_dir = (
589
+ "%s/3_feature256" % (exp_dir)
590
+ if version19 == "v1"
591
+ else "%s/3_feature768" % (exp_dir)
592
+ )
593
+ if not os.path.exists(feature_dir):
594
+ return "请先进行特征提取!"
595
+ listdir_res = list(os.listdir(feature_dir))
596
+ if len(listdir_res) == 0:
597
+ return "请先进行特征提取!"
598
+ infos = []
599
+ npys = []
600
+ for name in sorted(listdir_res):
601
+ phone = np.load("%s/%s" % (feature_dir, name))
602
+ npys.append(phone)
603
+ big_npy = np.concatenate(npys, 0)
604
+ big_npy_idx = np.arange(big_npy.shape[0])
605
+ np.random.shuffle(big_npy_idx)
606
+ big_npy = big_npy[big_npy_idx]
607
+ if big_npy.shape[0] > 2e5:
608
+ infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0])
609
+ yield "\n".join(infos)
610
+ try:
611
+ big_npy = (
612
+ MiniBatchKMeans(
613
+ n_clusters=10000,
614
+ verbose=True,
615
+ batch_size=256 * config.n_cpu,
616
+ compute_labels=False,
617
+ init="random",
618
+ )
619
+ .fit(big_npy)
620
+ .cluster_centers_
621
+ )
622
+ except:
623
+ info = traceback.format_exc()
624
+ logger.info(info)
625
+ infos.append(info)
626
+ yield "\n".join(infos)
627
+
628
+ np.save("%s/total_fea.npy" % exp_dir, big_npy)
629
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
630
+ infos.append("%s,%s" % (big_npy.shape, n_ivf))
631
+ yield "\n".join(infos)
632
+ index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
633
+ # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
634
+ infos.append("training")
635
+ yield "\n".join(infos)
636
+ index_ivf = faiss.extract_index_ivf(index) #
637
+ index_ivf.nprobe = 1
638
+ index.train(big_npy)
639
+ faiss.write_index(
640
+ index,
641
+ "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
642
+ % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
643
+ )
644
+
645
+ infos.append("adding")
646
+ yield "\n".join(infos)
647
+ batch_size_add = 8192
648
+ for i in range(0, big_npy.shape[0], batch_size_add):
649
+ index.add(big_npy[i : i + batch_size_add])
650
+ faiss.write_index(
651
+ index,
652
+ "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
653
+ % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
654
+ )
655
+ infos.append(
656
+ "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index"
657
+ % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
658
+ )
659
+ # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
660
+ # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
661
+ yield "\n".join(infos)
662
+
663
+
664
+ # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
665
+ def train1key(
666
+ exp_dir1,
667
+ sr2,
668
+ if_f0_3,
669
+ trainset_dir4,
670
+ spk_id5,
671
+ np7,
672
+ f0method8,
673
+ save_epoch10,
674
+ total_epoch11,
675
+ batch_size12,
676
+ if_save_latest13,
677
+ pretrained_G14,
678
+ pretrained_D15,
679
+ gpus16,
680
+ if_cache_gpu17,
681
+ if_save_every_weights18,
682
+ version19,
683
+ gpus_rmvpe,
684
+ ):
685
+ infos = []
686
+
687
+ def get_info_str(strr):
688
+ infos.append(strr)
689
+ return "\n".join(infos)
690
+
691
+ ####### step1:处理数据
692
+ yield get_info_str(i18n("step1:正在处理数据"))
693
+ [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)]
694
+
695
+ ####### step2a:提取音高
696
+ yield get_info_str(i18n("step2:正在提取音高&正在提取特征"))
697
+ [
698
+ get_info_str(_)
699
+ for _ in extract_f0_feature(
700
+ gpus16, np7, f0method8, if_f0_3, exp_dir1, version19, gpus_rmvpe
701
+ )
702
+ ]
703
+
704
+ ####### step3a:训练模型
705
+ yield get_info_str(i18n("step3a:正在训练模型"))
706
+ click_train(
707
+ exp_dir1,
708
+ sr2,
709
+ if_f0_3,
710
+ spk_id5,
711
+ save_epoch10,
712
+ total_epoch11,
713
+ batch_size12,
714
+ if_save_latest13,
715
+ pretrained_G14,
716
+ pretrained_D15,
717
+ gpus16,
718
+ if_cache_gpu17,
719
+ if_save_every_weights18,
720
+ version19,
721
+ )
722
+ yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"))
723
+
724
+ ####### step3b:训练索引
725
+ [get_info_str(_) for _ in train_index(exp_dir1, version19)]
726
+ yield get_info_str(i18n("全流程结束!"))
727
+
728
+
729
+ # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
730
+ def change_info_(ckpt_path):
731
+ if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")):
732
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
733
+ try:
734
+ with open(
735
+ ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r"
736
+ ) as f:
737
+ info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
738
+ sr, f0 = info["sample_rate"], info["if_f0"]
739
+ version = "v2" if ("version" in info and info["version"] == "v2") else "v1"
740
+ return sr, str(f0), version
741
+ except:
742
+ traceback.print_exc()
743
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
744
+
745
+
746
+ F0GPUVisible = config.dml == False
747
+
748
+
749
+ def change_f0_method(f0method8):
750
+ if f0method8 == "rmvpe_gpu":
751
+ visible = F0GPUVisible
752
+ else:
753
+ visible = False
754
+ return {"visible": visible, "__type__": "update"}
755
+
756
+ def find_model():
757
+ if len(names) > 0:
758
+ vc.get_vc(sorted(names)[0],None,None)
759
+ return sorted(names)[0]
760
+ else:
761
+ try:
762
+ gr.Info("Do not forget to choose a model.")
763
+ except:
764
+ pass
765
+ return ''
766
+
767
+ def find_audios(index=False):
768
+ audio_files=[]
769
+ if not os.path.exists('./audios'): os.mkdir("./audios")
770
+ for filename in os.listdir("./audios"):
771
+ if filename.endswith(('.wav','.mp3','.ogg')):
772
+ audio_files.append("./audios/"+filename)
773
+ if index:
774
+ if len(audio_files) > 0: return sorted(audio_files)[0]
775
+ else: return ""
776
+ elif len(audio_files) > 0: return sorted(audio_files)
777
+ else: return []
778
+
779
+ def get_index():
780
+ if find_model() != '':
781
+ chosen_model=sorted(names)[0].split(".")[0]
782
+ logs_path="./logs/"+chosen_model
783
+ if os.path.exists(logs_path):
784
+ for file in os.listdir(logs_path):
785
+ if file.endswith(".index"):
786
+ return os.path.join(logs_path, file)
787
+ return ''
788
+ else:
789
+ return ''
790
+
791
+ def get_indexes():
792
+ indexes_list=[]
793
+ for dirpath, dirnames, filenames in os.walk("./logs/"):
794
+ for filename in filenames:
795
+ if filename.endswith(".index"):
796
+ indexes_list.append(os.path.join(dirpath,filename))
797
+ if len(indexes_list) > 0:
798
+ return indexes_list
799
+ else:
800
+ return ''
801
+
802
+ def save_wav(file):
803
+ try:
804
+ file_path=file.name
805
+ shutil.move(file_path,'./audios')
806
+ return './audios/'+os.path.basename(file_path)
807
+ except AttributeError:
808
+ try:
809
+ new_name = 'kpop'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
810
+ new_path='./audios/'+new_name
811
+ shutil.move(file,new_path)
812
+ return new_path
813
+ except TypeError:
814
+ return None
815
+
816
+ def download_from_url(url, model):
817
+ if url == '':
818
+ return "URL cannot be left empty."
819
+ if model =='':
820
+ return "You need to name your model. With the model maker For example: Model-.nww's"
821
+ url = url.strip()
822
+ zip_dirs = ["zips", "unzips"]
823
+ for directory in zip_dirs:
824
+ if os.path.exists(directory):
825
+ shutil.rmtree(directory)
826
+ os.makedirs("zips", exist_ok=True)
827
+ os.makedirs("unzips", exist_ok=True)
828
+ zipfile = model + '.zip'
829
+ zipfile_path = './zips/' + zipfile
830
+ try:
831
+ if "drive.google.com" in url:
832
+ subprocess.run(["gdown", url, "--fuzzy", "-O", zipfile_path])
833
+ elif "mega.nz" in url:
834
+ m = Mega()
835
+ m.download_url(url, './zips')
836
+ else:
837
+ subprocess.run(["wget", url, "-O", zipfile_path])
838
+ for filename in os.listdir("./zips"):
839
+ if filename.endswith(".zip"):
840
+ zipfile_path = os.path.join("./zips/",filename)
841
+ shutil.unpack_archive(zipfile_path, "./unzips", 'zip')
842
+ else:
843
+ return "No zipfile found."
844
+ for root, dirs, files in os.walk('./unzips'):
845
+ for file in files:
846
+ file_path = os.path.join(root, file)
847
+ if file.endswith(".index"):
848
+ os.mkdir(f'./logs/{model}')
849
+ shutil.copy2(file_path,f'./logs/{model}')
850
+ elif "G_" not in file and "D_" not in file and file.endswith(".pth"):
851
+ shutil.copy(file_path,f'./assets/weights/{model}.pth')
852
+ shutil.rmtree("zips")
853
+ shutil.rmtree("unzips")
854
+ return "Model Successfully Imported. (If you are using a google drive link it may not work even this said Success)"
855
+ except:
856
+ return "There's been an error. (Check your link again!) or or (it worked and this is a false error haha... help)"
857
+
858
+ def upload_to_dataset(files, dir):
859
+ if dir == '':
860
+ dir = './dataset/'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
861
+ if not os.path.exists(dir):
862
+ os.makedirs(dir)
863
+ for file in files:
864
+ path=file.name
865
+ shutil.copy2(path,dir)
866
+ try:
867
+ gr.Info(i18n("处理数据"))
868
+ except:
869
+ pass
870
+ return i18n("处理数据"), {"value":dir,"__type__":"update"}
871
+
872
+ def download_model_files(model):
873
+ model_found = False
874
+ index_found = False
875
+ if os.path.exists(f'./assets/weights/{model}.pth'): model_found = True
876
+ if os.path.exists(f'./logs/{model}'):
877
+ for file in os.listdir(f'./logs/{model}'):
878
+ if file.endswith('.index') and 'added' in file:
879
+ log_file = file
880
+ index_found = True
881
+ if model_found and index_found:
882
+ return [f'./assets/weights/{model}.pth', f'./logs/{model}/{log_file}'], "Done"
883
+ elif model_found and not index_found:
884
+ return f'./assets/weights/{model}.pth', "Could not find Index file."
885
+ elif index_found and not model_found:
886
+ return f'./logs/{model}/{log_file}', f'Make sure the Voice Name is correct. I could not find {model}.pth'
887
+ else:
888
+ return None, f'Could not find {model}.pth or corresponding Index file.'
889
+
890
+ with gr.Blocks(title="KPOPEASYGUI 🔊",theme=gr.themes.Base(primary_hue="rose", secondary_hue="pink", neutral_hue="slate")) as app:
891
+ with gr.Row():
892
+ gr.HTML("<img src='file/lp.gif' alt='image/gif'>")
893
+ with gr.Tabs():
894
+ with gr.TabItem(i18n("模型推理")):
895
+ with gr.Row():
896
+ sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names), value=find_model())
897
+ refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary")
898
+ #clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary")
899
+ spk_item = gr.Slider(
900
+ minimum=0,
901
+ maximum=2333,
902
+ step=1,
903
+ label=i18n("请选择说话人id"),
904
+ value=0,
905
+ visible=False,
906
+ interactive=True,
907
+ )
908
+ #clean_button.click(
909
+ # fn=clean, inputs=[], outputs=[sid0], api_name="infer_clean"
910
+ #)
911
+ vc_transform0 = gr.Number(
912
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
913
+ )
914
+ but0 = gr.Button(i18n("转换"), variant="primary")
915
+ with gr.Row():
916
+ with gr.Column():
917
+ with gr.Row():
918
+ dropbox = gr.File(label="Drop your audio here & hit the Reload button.")
919
+ with gr.Row():
920
+ record_button=gr.Audio(source="microphone", label="OR Record audio.", type="filepath")
921
+ with gr.Row():
922
+ input_audio0 = gr.Dropdown(
923
+ label=i18n("输入待处理音频文件路径(默认是正确格式示例)"),
924
+ value=find_audios(True),
925
+ choices=find_audios()
926
+ )
927
+ record_button.change(fn=save_wav, inputs=[record_button], outputs=[input_audio0])
928
+ dropbox.upload(fn=save_wav, inputs=[dropbox], outputs=[input_audio0])
929
+ with gr.Column():
930
+ with gr.Accordion(label=i18n("自动检测index路径,下拉式选择(dropdown)"), open=False):
931
+ file_index2 = gr.Dropdown(
932
+ label=i18n("自动检测index路径,下拉式选择(dropdown)"),
933
+ choices=get_indexes(),
934
+ interactive=True,
935
+ value=get_index()
936
+ )
937
+ index_rate1 = gr.Slider(
938
+ minimum=0,
939
+ maximum=1,
940
+ label=i18n("检索特征占比"),
941
+ value=0.80,
942
+ interactive=True,
943
+ )
944
+ vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)"))
945
+ with gr.Accordion(label=i18n("常规设置"), open=False):
946
+ f0method0 = gr.Radio(
947
+ label=i18n(
948
+ "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU"
949
+ ),
950
+ choices=["pm", "harvest", "crepe", "rmvpe"]
951
+ if config.dml == False
952
+ else ["pm", "harvest", "rmvpe"],
953
+ value="rmvpe",
954
+ interactive=True,
955
+ )
956
+ filter_radius0 = gr.Slider(
957
+ minimum=0,
958
+ maximum=7,
959
+ label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
960
+ value=3,
961
+ step=1,
962
+ interactive=True,
963
+ )
964
+ resample_sr0 = gr.Slider(
965
+ minimum=0,
966
+ maximum=48000,
967
+ label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
968
+ value=0,
969
+ step=1,
970
+ interactive=True,
971
+ visible=False
972
+ )
973
+ rms_mix_rate0 = gr.Slider(
974
+ minimum=0,
975
+ maximum=1,
976
+ label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
977
+ value=0.21,
978
+ interactive=True,
979
+ )
980
+ protect0 = gr.Slider(
981
+ minimum=0,
982
+ maximum=0.5,
983
+ label=i18n(
984
+ "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力��但可能降低索引效果"
985
+ ),
986
+ value=0.28,
987
+ step=0.01,
988
+ interactive=True,
989
+ )
990
+ file_index1 = gr.Textbox(
991
+ label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
992
+ value="",
993
+ interactive=True,
994
+ visible=False
995
+ )
996
+ refresh_button.click(
997
+ fn=change_choices,
998
+ inputs=[],
999
+ outputs=[sid0, file_index2, input_audio0],
1000
+ api_name="infer_refresh",
1001
+ )
1002
+ # file_big_npy1 = gr.Textbox(
1003
+ # label=i18n("特征文件路径"),
1004
+ # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1005
+ # interactive=True,
1006
+ # )
1007
+ with gr.Row():
1008
+ f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False)
1009
+ with gr.Row():
1010
+ vc_output1 = gr.Textbox(label=i18n("输出信息"))
1011
+ but0.click(
1012
+ vc.vc_single,
1013
+ [
1014
+ spk_item,
1015
+ input_audio0,
1016
+ vc_transform0,
1017
+ f0_file,
1018
+ f0method0,
1019
+ file_index1,
1020
+ file_index2,
1021
+ # file_big_npy1,
1022
+ index_rate1,
1023
+ filter_radius0,
1024
+ resample_sr0,
1025
+ rms_mix_rate0,
1026
+ protect0,
1027
+ ],
1028
+ [vc_output1, vc_output2],
1029
+ api_name="infer_convert",
1030
+ )
1031
+ with gr.Row():
1032
+ with gr.Accordion(open=False, label=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ")):
1033
+ with gr.Row():
1034
+ opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1035
+ vc_transform1 = gr.Number(
1036
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1037
+ )
1038
+ f0method1 = gr.Radio(
1039
+ label=i18n(
1040
+ "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU"
1041
+ ),
1042
+ choices=["pm", "harvest", "crepe", "rmvpe"]
1043
+ if config.dml == False
1044
+ else ["pm", "harvest", "rmvpe"],
1045
+ value="pm",
1046
+ interactive=True,
1047
+ )
1048
+ with gr.Row():
1049
+ filter_radius1 = gr.Slider(
1050
+ minimum=0,
1051
+ maximum=7,
1052
+ label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1053
+ value=3,
1054
+ step=1,
1055
+ interactive=True,
1056
+ visible=False
1057
+ )
1058
+ with gr.Row():
1059
+ file_index3 = gr.Textbox(
1060
+ label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
1061
+ value="",
1062
+ interactive=True,
1063
+ visible=False
1064
+ )
1065
+ file_index4 = gr.Dropdown(
1066
+ label=i18n("自动检测index路径,下拉式选择(dropdown)"),
1067
+ choices=sorted(index_paths),
1068
+ interactive=True,
1069
+ visible=False
1070
+ )
1071
+ refresh_button.click(
1072
+ fn=lambda: change_choices()[1],
1073
+ inputs=[],
1074
+ outputs=file_index4,
1075
+ api_name="infer_refresh_batch",
1076
+ )
1077
+ # file_big_npy2 = gr.Textbox(
1078
+ # label=i18n("特征文件路径"),
1079
+ # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1080
+ # interactive=True,
1081
+ # )
1082
+ index_rate2 = gr.Slider(
1083
+ minimum=0,
1084
+ maximum=1,
1085
+ label=i18n("检索特征占比"),
1086
+ value=1,
1087
+ interactive=True,
1088
+ visible=False
1089
+ )
1090
+ with gr.Row():
1091
+ resample_sr1 = gr.Slider(
1092
+ minimum=0,
1093
+ maximum=48000,
1094
+ label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1095
+ value=0,
1096
+ step=1,
1097
+ interactive=True,
1098
+ visible=False
1099
+ )
1100
+ rms_mix_rate1 = gr.Slider(
1101
+ minimum=0,
1102
+ maximum=1,
1103
+ label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1104
+ value=0.21,
1105
+ interactive=True,
1106
+ )
1107
+ protect1 = gr.Slider(
1108
+ minimum=0,
1109
+ maximum=0.5,
1110
+ label=i18n(
1111
+ "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1112
+ ),
1113
+ value=0.28,
1114
+ step=0.01,
1115
+ interactive=True,
1116
+ )
1117
+ with gr.Row():
1118
+ dir_input = gr.Textbox(
1119
+ label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1120
+ value="./audios",
1121
+ )
1122
+ inputs = gr.File(
1123
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1124
+ )
1125
+ with gr.Row():
1126
+ format1 = gr.Radio(
1127
+ label=i18n("导出文件格式"),
1128
+ choices=["wav", "flac", "mp3", "m4a"],
1129
+ value="wav",
1130
+ interactive=True,
1131
+ )
1132
+ but1 = gr.Button(i18n("转换"), variant="primary")
1133
+ vc_output3 = gr.Textbox(label=i18n("输出信息"))
1134
+ but1.click(
1135
+ vc.vc_multi,
1136
+ [
1137
+ spk_item,
1138
+ dir_input,
1139
+ opt_input,
1140
+ inputs,
1141
+ vc_transform1,
1142
+ f0method1,
1143
+ file_index1,
1144
+ file_index2,
1145
+ # file_big_npy2,
1146
+ index_rate1,
1147
+ filter_radius1,
1148
+ resample_sr1,
1149
+ rms_mix_rate1,
1150
+ protect1,
1151
+ format1,
1152
+ ],
1153
+ [vc_output3],
1154
+ api_name="infer_convert_batch",
1155
+ )
1156
+ sid0.change(
1157
+ fn=vc.get_vc,
1158
+ inputs=[sid0, protect0, protect1],
1159
+ outputs=[spk_item, protect0, protect1, file_index2, file_index4],
1160
+ )
1161
+ with gr.TabItem("Download Model"):
1162
+ with gr.Row():
1163
+ gr.Markdown(
1164
+ """
1165
+ ⚠️ Google Drive Links, and some leelo models will not work with this gradio ⚠️
1166
+ """
1167
+ )
1168
+ with gr.Row():
1169
+ url=gr.Textbox(label="Enter the URL to the Model:")
1170
+ with gr.Row():
1171
+ model = gr.Textbox(label="Name your model (with model maker name!!!):")
1172
+ download_button=gr.Button("Download")
1173
+ with gr.Row():
1174
+ status_bar=gr.Textbox(label="")
1175
+ download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar])
1176
+ with gr.Row():
1177
+ gr.Markdown(
1178
+ """
1179
+ ❤️ Support Original Creator from this easyGUI ❤️
1180
+ paypal.me/lesantillan
1181
+ """
1182
+ )
1183
+
1184
+ with gr.TabItem("Training"):
1185
+ with gr.Row():
1186
+ gr.Markdown(
1187
+ """
1188
+ ⚠️ HAHAH YOU THOUGHT I ADDED TRAINING??? NO OFC DUH ⚠️
1189
+ """
1190
+ )
1191
+
1192
+ if config.iscolab:
1193
+ app.queue(concurrency_count=511, max_size=1022).launch(share=True),
1194
+ favicon_path="./TW.png",
1195
+ else:
1196
+ app.queue(concurrency_count=511, max_size=1022).launch(
1197
+ server_name="0.0.0.0",
1198
+ favicon_path="./TW.png",
1199
+ inbrowser=not config.noautoopen,
1200
+ server_port=config.listen_port,
1201
+ quiet=True,
1202
+ )