kevinwang676 commited on
Commit
a360a9d
·
verified ·
1 Parent(s): 5f2b417

Create test.py

Browse files
Files changed (1) hide show
  1. test.py +1112 -0
test.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os,shutil,sys,pdb,re
2
+ now_dir = os.getcwd()
3
+ sys.path.append(now_dir)
4
+ import json,yaml,warnings,torch
5
+ import platform
6
+ import psutil
7
+ import signal
8
+ from pathlib import Path
9
+
10
+ warnings.filterwarnings("ignore")
11
+ torch.manual_seed(233333)
12
+ tmp = os.path.join(now_dir, "TEMP")
13
+ os.makedirs(tmp, exist_ok=True)
14
+ os.environ["TEMP"] = tmp
15
+ if(os.path.exists(tmp)):
16
+ for name in os.listdir(tmp):
17
+ if(name=="jieba.cache"):continue
18
+ path="%s/%s"%(tmp,name)
19
+ delete=os.remove if os.path.isfile(path) else shutil.rmtree
20
+ try:
21
+ delete(path)
22
+ except Exception as e:
23
+ print(str(e))
24
+ pass
25
+ import site
26
+ site_packages_roots = []
27
+ for path in site.getsitepackages():
28
+ if "packages" in path:
29
+ site_packages_roots.append(path)
30
+ if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
31
+ #os.environ["OPENBLAS_NUM_THREADS"] = "4"
32
+ os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
33
+ os.environ["all_proxy"] = ""
34
+ for site_packages_root in site_packages_roots:
35
+ if os.path.exists(site_packages_root):
36
+ try:
37
+ with open("%s/users.pth" % (site_packages_root), "w") as f:
38
+ f.write(
39
+ "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
40
+ % (now_dir, now_dir, now_dir, now_dir, now_dir)
41
+ )
42
+ break
43
+ except PermissionError:
44
+ pass
45
+ from tools import my_utils
46
+ import traceback
47
+ import shutil
48
+ import pdb
49
+ import gradio as gr
50
+ from subprocess import Popen
51
+ import signal
52
+ from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share
53
+ from tools.i18n.i18n import I18nAuto
54
+ i18n = I18nAuto()
55
+ from scipy.io import wavfile
56
+ from tools.my_utils import load_audio
57
+ from multiprocessing import cpu_count
58
+
59
+ import argparse
60
+ import os
61
+ import sys
62
+ import tempfile
63
+
64
+ import gradio as gr
65
+ import librosa.display
66
+ import numpy as np
67
+
68
+ import torch
69
+ import torchaudio
70
+ import traceback
71
+ from TTS.demos.xtts_ft_demo.utils.formatter import format_audio_list
72
+ from TTS.demos.xtts_ft_demo.utils.gpt_train import train_gpt
73
+
74
+ from TTS.tts.configs.xtts_config import XttsConfig
75
+ from TTS.tts.models.xtts import Xtts
76
+
77
+ # from .list to .csv
78
+ import pandas as pd
79
+ from sklearn.model_selection import train_test_split
80
+
81
+ def split_csv(input_csv, train_csv, eval_csv, eval_size=0.15):
82
+ # Load the data from the CSV file
83
+ data = pd.read_csv(input_csv, delimiter='|', header=0)
84
+
85
+ # Split the data into training and evaluation sets
86
+ train_data, eval_data = train_test_split(data, test_size=eval_size, random_state=42)
87
+
88
+ # Save the training data to a CSV file
89
+ train_data.to_csv(train_csv, index=False, sep='|')
90
+
91
+ # Save the evaluation data to a CSV file
92
+ eval_data.to_csv(eval_csv, index=False, sep='|')
93
+
94
+ print("CSV files have been successfully split.")
95
+
96
+
97
+ def convert_list_to_csv(input_file, output_file):
98
+ try:
99
+ # Open the input .list file to read
100
+ with open(input_file, 'r', encoding='utf-8') as infile:
101
+ # Open the output .csv file to write
102
+ with open(output_file, 'w', encoding='utf-8') as outfile:
103
+ # Write the header to the CSV
104
+ outfile.write("audio_file|text|speaker_name\n")
105
+ # Process each line in the input file
106
+ for line in infile:
107
+ parts = line.strip().split('|')
108
+ if len(parts) == 4:
109
+ # Extract relevant parts: WAV file path and transcription
110
+ wav_path = parts[0]
111
+ transcription = parts[3]
112
+ # Write the formatted line to the CSV file
113
+ outfile.write(f"{wav_path}|{transcription}|coqui\n")
114
+ print("Conversion to CSV completed successfully.")
115
+ split_csv(output_file, "train.csv", "eval.csv")
116
+ print("Split completed successfully")
117
+ return "train.csv", "eval.csv"
118
+ except Exception as e:
119
+ print(f"An error occurred: {e}")
120
+
121
+
122
+ def clear_gpu_cache():
123
+ # clear the GPU cache
124
+ if torch.cuda.is_available():
125
+ torch.cuda.empty_cache()
126
+
127
+ XTTS_MODEL = None
128
+ def load_model(xtts_checkpoint, xtts_config, xtts_vocab):
129
+ global XTTS_MODEL
130
+ clear_gpu_cache()
131
+ if not xtts_checkpoint or not xtts_config or not xtts_vocab:
132
+ return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
133
+ config = XttsConfig()
134
+ config.load_json(xtts_config)
135
+ XTTS_MODEL = Xtts.init_from_config(config)
136
+ print("Loading XTTS model! ")
137
+ XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab, use_deepspeed=False)
138
+ if torch.cuda.is_available():
139
+ XTTS_MODEL.cuda()
140
+
141
+ print("Model Loaded!")
142
+ return "Model Loaded!"
143
+
144
+ def run_tts(lang, tts_text, speaker_audio_file):
145
+ if XTTS_MODEL is None or not speaker_audio_file:
146
+ return "You need to run the previous step to load the model !!", None, None
147
+
148
+ gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_file, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
149
+ out = XTTS_MODEL.inference(
150
+ text=tts_text,
151
+ language=lang,
152
+ gpt_cond_latent=gpt_cond_latent,
153
+ speaker_embedding=speaker_embedding,
154
+ temperature=XTTS_MODEL.config.temperature, # Add custom parameters here
155
+ length_penalty=XTTS_MODEL.config.length_penalty,
156
+ repetition_penalty=XTTS_MODEL.config.repetition_penalty,
157
+ top_k=XTTS_MODEL.config.top_k,
158
+ top_p=XTTS_MODEL.config.top_p,
159
+ )
160
+
161
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
162
+ out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
163
+ out_path = fp.name
164
+ torchaudio.save(out_path, out["wav"], 24000)
165
+
166
+ return "Speech generated !", out_path, speaker_audio_file
167
+
168
+
169
+
170
+
171
+ # define a logger to redirect
172
+ class Logger:
173
+ def __init__(self, filename="log.out"):
174
+ self.log_file = filename
175
+ self.terminal = sys.stdout
176
+ self.log = open(self.log_file, "w")
177
+
178
+ def write(self, message):
179
+ self.terminal.write(message)
180
+ self.log.write(message)
181
+
182
+ def flush(self):
183
+ self.terminal.flush()
184
+ self.log.flush()
185
+
186
+ def isatty(self):
187
+ return False
188
+
189
+ # redirect stdout and stderr to a file
190
+ sys.stdout = Logger()
191
+ sys.stderr = sys.stdout
192
+
193
+
194
+ # logging.basicConfig(stream=sys.stdout, level=logging.INFO)
195
+ import logging
196
+ logging.basicConfig(
197
+ level=logging.WARNING,
198
+ format="%(asctime)s [%(levelname)s] %(message)s",
199
+ handlers=[
200
+ logging.StreamHandler(sys.stdout)
201
+ ]
202
+ )
203
+
204
+ def read_logs():
205
+ sys.stdout.flush()
206
+ with open(sys.stdout.log_file, "r") as f:
207
+ return f.read()
208
+
209
+
210
+ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
211
+
212
+ n_cpu=cpu_count()
213
+
214
+ ngpu = torch.cuda.device_count()
215
+ gpu_infos = []
216
+ mem = []
217
+ if_gpu_ok = False
218
+
219
+ # 判断是否有能用来训练和加速推理的N卡
220
+ if torch.cuda.is_available() or ngpu != 0:
221
+ for i in range(ngpu):
222
+ gpu_name = torch.cuda.get_device_name(i)
223
+ if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L4","4060"]):
224
+ # A10#A100#V100#A40#P40#M40#K80#A4500
225
+ if_gpu_ok = True # 至少有一张能用的N卡
226
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
227
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
228
+ # 判断是否支持mps加速
229
+ if torch.backends.mps.is_available():
230
+ if_gpu_ok = True
231
+ gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
232
+ mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
233
+
234
+ if if_gpu_ok and len(gpu_infos) > 0:
235
+ gpu_info = "\n".join(gpu_infos)
236
+ default_batch_size = min(mem) // 2
237
+ else:
238
+ gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
239
+ default_batch_size = 1
240
+ gpus = "-".join([i[0] for i in gpu_infos])
241
+
242
+ pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth"
243
+ pretrained_gpt_name="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
244
+ def get_weights_names():
245
+ SoVITS_names = [pretrained_sovits_name]
246
+ for name in os.listdir(SoVITS_weight_root):
247
+ if name.endswith(".pth"):SoVITS_names.append(name)
248
+ GPT_names = [pretrained_gpt_name]
249
+ for name in os.listdir(GPT_weight_root):
250
+ if name.endswith(".ckpt"): GPT_names.append(name)
251
+ return SoVITS_names,GPT_names
252
+ SoVITS_weight_root="SoVITS_weights"
253
+ GPT_weight_root="GPT_weights"
254
+ os.makedirs(SoVITS_weight_root,exist_ok=True)
255
+ os.makedirs(GPT_weight_root,exist_ok=True)
256
+ SoVITS_names,GPT_names = get_weights_names()
257
+
258
+ def custom_sort_key(s):
259
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
260
+ parts = re.split('(\d+)', s)
261
+ # 将数字部分转换为整数,非数字部分保持不变
262
+ parts = [int(part) if part.isdigit() else part for part in parts]
263
+ return parts
264
+
265
+ def change_choices():
266
+ SoVITS_names, GPT_names = get_weights_names()
267
+ return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"}
268
+
269
+ p_label=None
270
+ p_uvr5=None
271
+ p_asr=None
272
+ p_denoise=None
273
+ p_tts_inference=None
274
+
275
+ def kill_proc_tree(pid, including_parent=True):
276
+ try:
277
+ parent = psutil.Process(pid)
278
+ except psutil.NoSuchProcess:
279
+ # Process already terminated
280
+ return
281
+
282
+ children = parent.children(recursive=True)
283
+ for child in children:
284
+ try:
285
+ os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
286
+ except OSError:
287
+ pass
288
+ if including_parent:
289
+ try:
290
+ os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
291
+ except OSError:
292
+ pass
293
+
294
+ system=platform.system()
295
+ def kill_process(pid):
296
+ if(system=="Windows"):
297
+ cmd = "taskkill /t /f /pid %s" % pid
298
+ os.system(cmd)
299
+ else:
300
+ kill_proc_tree(pid)
301
+
302
+
303
+ def change_label(if_label,path_list):
304
+ global p_label
305
+ if(if_label==True and p_label==None):
306
+ path_list=my_utils.clean_path(path_list)
307
+ cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share)
308
+ yield i18n("打标工具WebUI已开启")
309
+ print(cmd)
310
+ p_label = Popen(cmd, shell=True)
311
+ elif(if_label==False and p_label!=None):
312
+ kill_process(p_label.pid)
313
+ p_label=None
314
+ yield i18n("打标工具WebUI已关闭")
315
+
316
+ def change_uvr5(if_uvr5):
317
+ global p_uvr5
318
+ if(if_uvr5==True and p_uvr5==None):
319
+ cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share)
320
+ yield i18n("UVR5已开启")
321
+ print(cmd)
322
+ p_uvr5 = Popen(cmd, shell=True)
323
+ elif(if_uvr5==False and p_uvr5!=None):
324
+ kill_process(p_uvr5.pid)
325
+ p_uvr5=None
326
+ yield i18n("UVR5已关闭")
327
+
328
+ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path):
329
+ global p_tts_inference
330
+ if(if_tts==True and p_tts_inference==None):
331
+ os.environ["gpt_path"]=gpt_path if "/" in gpt_path else "%s/%s"%(GPT_weight_root,gpt_path)
332
+ os.environ["sovits_path"]=sovits_path if "/"in sovits_path else "%s/%s"%(SoVITS_weight_root,sovits_path)
333
+ os.environ["cnhubert_base_path"]=cnhubert_base_path
334
+ os.environ["bert_path"]=bert_path
335
+ os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number
336
+ os.environ["is_half"]=str(is_half)
337
+ os.environ["infer_ttswebui"]=str(webui_port_infer_tts)
338
+ os.environ["is_share"]=str(is_share)
339
+ cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec)
340
+ yield i18n("TTS推理进程已开启")
341
+ print(cmd)
342
+ p_tts_inference = Popen(cmd, shell=True)
343
+ elif(if_tts==False and p_tts_inference!=None):
344
+ kill_process(p_tts_inference.pid)
345
+ p_tts_inference=None
346
+ yield i18n("TTS推理进程已关闭")
347
+
348
+ from tools.asr.config import asr_dict
349
+ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang):
350
+ global p_asr
351
+ if(p_asr==None):
352
+ asr_inp_dir=my_utils.clean_path(asr_inp_dir)
353
+ cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
354
+ cmd += f' -i "{asr_inp_dir}"'
355
+ cmd += f' -o "{asr_opt_dir}"'
356
+ cmd += f' -s {asr_model_size}'
357
+ cmd += f' -l {asr_lang}'
358
+ cmd += " -p %s"%("float16"if is_half==True else "float32")
359
+
360
+ yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
361
+ print(cmd)
362
+ p_asr = Popen(cmd, shell=True)
363
+ p_asr.wait()
364
+ p_asr=None
365
+ yield f"ASR任务完成, 查看终端进行下一步",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
366
+ else:
367
+ yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
368
+ # return None
369
+
370
+ def close_asr():
371
+ global p_asr
372
+ if(p_asr!=None):
373
+ kill_process(p_asr.pid)
374
+ p_asr=None
375
+ return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
376
+ def open_denoise(denoise_inp_dir, denoise_opt_dir):
377
+ global p_denoise
378
+ if(p_denoise==None):
379
+ denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
380
+ denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
381
+ cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
382
+
383
+ yield "语音降噪任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
384
+ print(cmd)
385
+ p_denoise = Popen(cmd, shell=True)
386
+ p_denoise.wait()
387
+ p_denoise=None
388
+ yield f"语音降噪任务完成, 查看终端进行下一步",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
389
+ else:
390
+ yield "已有正在进行的语音降噪任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
391
+ # return None
392
+
393
+ def close_denoise():
394
+ global p_denoise
395
+ if(p_denoise!=None):
396
+ kill_process(p_denoise.pid)
397
+ p_denoise=None
398
+ return "已终止语音降噪进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
399
+
400
+ p_train_SoVITS=None
401
+ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D):
402
+ global p_train_SoVITS
403
+ if(p_train_SoVITS==None):
404
+ with open("GPT_SoVITS/configs/s2.json")as f:
405
+ data=f.read()
406
+ data=json.loads(data)
407
+ s2_dir="%s/%s"%(exp_root,exp_name)
408
+ os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True)
409
+ if(is_half==False):
410
+ data["train"]["fp16_run"]=False
411
+ batch_size=max(1,batch_size//2)
412
+ data["train"]["batch_size"]=batch_size
413
+ data["train"]["epochs"]=total_epoch
414
+ data["train"]["text_low_lr_rate"]=text_low_lr_rate
415
+ data["train"]["pretrained_s2G"]=pretrained_s2G
416
+ data["train"]["pretrained_s2D"]=pretrained_s2D
417
+ data["train"]["if_save_latest"]=if_save_latest
418
+ data["train"]["if_save_every_weights"]=if_save_every_weights
419
+ data["train"]["save_every_epoch"]=save_every_epoch
420
+ data["train"]["gpu_numbers"]=gpu_numbers1Ba
421
+ data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
422
+ data["save_weight_dir"]=SoVITS_weight_root
423
+ data["name"]=exp_name
424
+ tmp_config_path="%s/tmp_s2.json"%tmp
425
+ with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
426
+
427
+ cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path)
428
+ yield "SoVITS训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
429
+ print(cmd)
430
+ p_train_SoVITS = Popen(cmd, shell=True)
431
+ p_train_SoVITS.wait()
432
+ p_train_SoVITS=None
433
+ yield "SoVITS训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
434
+ else:
435
+ yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
436
+
437
+ def close1Ba():
438
+ global p_train_SoVITS
439
+ if(p_train_SoVITS!=None):
440
+ kill_process(p_train_SoVITS.pid)
441
+ p_train_SoVITS=None
442
+ return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
443
+
444
+ p_train_GPT=None
445
+ def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
446
+ global p_train_GPT
447
+ if(p_train_GPT==None):
448
+ with open("GPT_SoVITS/configs/s1longer.yaml")as f:
449
+ data=f.read()
450
+ data=yaml.load(data, Loader=yaml.FullLoader)
451
+ s1_dir="%s/%s"%(exp_root,exp_name)
452
+ os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
453
+ if(is_half==False):
454
+ data["train"]["precision"]="32"
455
+ batch_size = max(1, batch_size // 2)
456
+ data["train"]["batch_size"]=batch_size
457
+ data["train"]["epochs"]=total_epoch
458
+ data["pretrained_s1"]=pretrained_s1
459
+ data["train"]["save_every_n_epoch"]=save_every_epoch
460
+ data["train"]["if_save_every_weights"]=if_save_every_weights
461
+ data["train"]["if_save_latest"]=if_save_latest
462
+ data["train"]["if_dpo"]=if_dpo
463
+ data["train"]["half_weights_save_dir"]=GPT_weight_root
464
+ data["train"]["exp_name"]=exp_name
465
+ data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir
466
+ data["train_phoneme_path"]="%s/2-name2text.txt"%s1_dir
467
+ data["output_dir"]="%s/logs_s1"%s1_dir
468
+
469
+ os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_numbers.replace("-",",")
470
+ os.environ["hz"]="25hz"
471
+ tmp_config_path="%s/tmp_s1.yaml"%tmp
472
+ with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False))
473
+ # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir)
474
+ cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path)
475
+ yield "GPT训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True}
476
+ print(cmd)
477
+ p_train_GPT = Popen(cmd, shell=True)
478
+ p_train_GPT.wait()
479
+ p_train_GPT=None
480
+ yield "GPT训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
481
+ else:
482
+ yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True}
483
+
484
+ def close1Bb():
485
+ global p_train_GPT
486
+ if(p_train_GPT!=None):
487
+ kill_process(p_train_GPT.pid)
488
+ p_train_GPT=None
489
+ return "已终止GPT训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
490
+
491
+ ps_slice=[]
492
+ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
493
+ global ps_slice
494
+ inp = my_utils.clean_path(inp)
495
+ opt_root = my_utils.clean_path(opt_root)
496
+ if(os.path.exists(inp)==False):
497
+ yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
498
+ return
499
+ if os.path.isfile(inp):n_parts=1
500
+ elif os.path.isdir(inp):pass
501
+ else:
502
+ yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
503
+ return
504
+ if (ps_slice == []):
505
+ for i_part in range(n_parts):
506
+ cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
507
+ print(cmd)
508
+ p = Popen(cmd, shell=True)
509
+ ps_slice.append(p)
510
+ yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
511
+ for p in ps_slice:
512
+ p.wait()
513
+ ps_slice=[]
514
+ yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
515
+ else:
516
+ yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
517
+
518
+ def close_slice():
519
+ global ps_slice
520
+ if (ps_slice != []):
521
+ for p_slice in ps_slice:
522
+ try:
523
+ kill_process(p_slice.pid)
524
+ except:
525
+ traceback.print_exc()
526
+ ps_slice=[]
527
+ return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
528
+
529
+ ps1a=[]
530
+ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
531
+ global ps1a
532
+ inp_text = my_utils.clean_path(inp_text)
533
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
534
+ if (ps1a == []):
535
+ opt_dir="%s/%s"%(exp_root,exp_name)
536
+ config={
537
+ "inp_text":inp_text,
538
+ "inp_wav_dir":inp_wav_dir,
539
+ "exp_name":exp_name,
540
+ "opt_dir":opt_dir,
541
+ "bert_pretrained_dir":bert_pretrained_dir,
542
+ }
543
+ gpu_names=gpu_numbers.split("-")
544
+ all_parts=len(gpu_names)
545
+ for i_part in range(all_parts):
546
+ config.update(
547
+ {
548
+ "i_part": str(i_part),
549
+ "all_parts": str(all_parts),
550
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
551
+ "is_half": str(is_half)
552
+ }
553
+ )
554
+ os.environ.update(config)
555
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
556
+ print(cmd)
557
+ p = Popen(cmd, shell=True)
558
+ ps1a.append(p)
559
+ yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
560
+ for p in ps1a:
561
+ p.wait()
562
+ opt = []
563
+ for i_part in range(all_parts):
564
+ txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
565
+ with open(txt_path, "r", encoding="utf8") as f:
566
+ opt += f.read().strip("\n").split("\n")
567
+ os.remove(txt_path)
568
+ path_text = "%s/2-name2text.txt" % opt_dir
569
+ with open(path_text, "w", encoding="utf8") as f:
570
+ f.write("\n".join(opt) + "\n")
571
+ ps1a=[]
572
+ yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
573
+ else:
574
+ yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
575
+
576
+ def close1a():
577
+ global ps1a
578
+ if (ps1a != []):
579
+ for p1a in ps1a:
580
+ try:
581
+ kill_process(p1a.pid)
582
+ except:
583
+ traceback.print_exc()
584
+ ps1a=[]
585
+ return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
586
+
587
+ ps1b=[]
588
+ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
589
+ global ps1b
590
+ inp_text = my_utils.clean_path(inp_text)
591
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
592
+ if (ps1b == []):
593
+ config={
594
+ "inp_text":inp_text,
595
+ "inp_wav_dir":inp_wav_dir,
596
+ "exp_name":exp_name,
597
+ "opt_dir":"%s/%s"%(exp_root,exp_name),
598
+ "cnhubert_base_dir":ssl_pretrained_dir,
599
+ "is_half": str(is_half)
600
+ }
601
+ gpu_names=gpu_numbers.split("-")
602
+ all_parts=len(gpu_names)
603
+ for i_part in range(all_parts):
604
+ config.update(
605
+ {
606
+ "i_part": str(i_part),
607
+ "all_parts": str(all_parts),
608
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
609
+ }
610
+ )
611
+ os.environ.update(config)
612
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
613
+ print(cmd)
614
+ p = Popen(cmd, shell=True)
615
+ ps1b.append(p)
616
+ yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
617
+ for p in ps1b:
618
+ p.wait()
619
+ ps1b=[]
620
+ yield "SSL提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
621
+ else:
622
+ yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
623
+
624
+ def close1b():
625
+ global ps1b
626
+ if (ps1b != []):
627
+ for p1b in ps1b:
628
+ try:
629
+ kill_process(p1b.pid)
630
+ except:
631
+ traceback.print_exc()
632
+ ps1b=[]
633
+ return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
634
+
635
+ ps1c=[]
636
+ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
637
+ global ps1c
638
+ inp_text = my_utils.clean_path(inp_text)
639
+ if (ps1c == []):
640
+ opt_dir="%s/%s"%(exp_root,exp_name)
641
+ config={
642
+ "inp_text":inp_text,
643
+ "exp_name":exp_name,
644
+ "opt_dir":opt_dir,
645
+ "pretrained_s2G":pretrained_s2G_path,
646
+ "s2config_path":"GPT_SoVITS/configs/s2.json",
647
+ "is_half": str(is_half)
648
+ }
649
+ gpu_names=gpu_numbers.split("-")
650
+ all_parts=len(gpu_names)
651
+ for i_part in range(all_parts):
652
+ config.update(
653
+ {
654
+ "i_part": str(i_part),
655
+ "all_parts": str(all_parts),
656
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
657
+ }
658
+ )
659
+ os.environ.update(config)
660
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
661
+ print(cmd)
662
+ p = Popen(cmd, shell=True)
663
+ ps1c.append(p)
664
+ yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
665
+ for p in ps1c:
666
+ p.wait()
667
+ opt = ["item_name\tsemantic_audio"]
668
+ path_semantic = "%s/6-name2semantic.tsv" % opt_dir
669
+ for i_part in range(all_parts):
670
+ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
671
+ with open(semantic_path, "r", encoding="utf8") as f:
672
+ opt += f.read().strip("\n").split("\n")
673
+ os.remove(semantic_path)
674
+ with open(path_semantic, "w", encoding="utf8") as f:
675
+ f.write("\n".join(opt) + "\n")
676
+ ps1c=[]
677
+ yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
678
+ else:
679
+ yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
680
+
681
+ def close1c():
682
+ global ps1c
683
+ if (ps1c != []):
684
+ for p1c in ps1c:
685
+ try:
686
+ kill_process(p1c.pid)
687
+ except:
688
+ traceback.print_exc()
689
+ ps1c=[]
690
+ return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
691
+ #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G
692
+ ps1abc=[]
693
+ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path):
694
+ global ps1abc
695
+ inp_text = my_utils.clean_path(inp_text)
696
+ inp_wav_dir = my_utils.clean_path(inp_wav_dir)
697
+ if (ps1abc == []):
698
+ opt_dir="%s/%s"%(exp_root,exp_name)
699
+ try:
700
+ #############################1a
701
+ path_text="%s/2-name2text.txt" % opt_dir
702
+ if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and len(open(path_text,"r",encoding="utf8").read().strip("\n").split("\n"))<2)):
703
+ config={
704
+ "inp_text":inp_text,
705
+ "inp_wav_dir":inp_wav_dir,
706
+ "exp_name":exp_name,
707
+ "opt_dir":opt_dir,
708
+ "bert_pretrained_dir":bert_pretrained_dir,
709
+ "is_half": str(is_half)
710
+ }
711
+ gpu_names=gpu_numbers1a.split("-")
712
+ all_parts=len(gpu_names)
713
+ for i_part in range(all_parts):
714
+ config.update(
715
+ {
716
+ "i_part": str(i_part),
717
+ "all_parts": str(all_parts),
718
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
719
+ }
720
+ )
721
+ os.environ.update(config)
722
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
723
+ print(cmd)
724
+ p = Popen(cmd, shell=True)
725
+ ps1abc.append(p)
726
+ yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
727
+ for p in ps1abc:p.wait()
728
+
729
+ opt = []
730
+ for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
731
+ txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
732
+ with open(txt_path, "r",encoding="utf8") as f:
733
+ opt += f.read().strip("\n").split("\n")
734
+ os.remove(txt_path)
735
+ with open(path_text, "w",encoding="utf8") as f:
736
+ f.write("\n".join(opt) + "\n")
737
+
738
+ yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
739
+ ps1abc=[]
740
+ #############################1b
741
+ config={
742
+ "inp_text":inp_text,
743
+ "inp_wav_dir":inp_wav_dir,
744
+ "exp_name":exp_name,
745
+ "opt_dir":opt_dir,
746
+ "cnhubert_base_dir":ssl_pretrained_dir,
747
+ }
748
+ gpu_names=gpu_numbers1Ba.split("-")
749
+ all_parts=len(gpu_names)
750
+ for i_part in range(all_parts):
751
+ config.update(
752
+ {
753
+ "i_part": str(i_part),
754
+ "all_parts": str(all_parts),
755
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
756
+ }
757
+ )
758
+ os.environ.update(config)
759
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
760
+ print(cmd)
761
+ p = Popen(cmd, shell=True)
762
+ ps1abc.append(p)
763
+ yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
764
+ for p in ps1abc:p.wait()
765
+ yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
766
+ ps1abc=[]
767
+ #############################1c
768
+ path_semantic = "%s/6-name2semantic.tsv" % opt_dir
769
+ if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)):
770
+ config={
771
+ "inp_text":inp_text,
772
+ "exp_name":exp_name,
773
+ "opt_dir":opt_dir,
774
+ "pretrained_s2G":pretrained_s2G_path,
775
+ "s2config_path":"GPT_SoVITS/configs/s2.json",
776
+ }
777
+ gpu_names=gpu_numbers1c.split("-")
778
+ all_parts=len(gpu_names)
779
+ for i_part in range(all_parts):
780
+ config.update(
781
+ {
782
+ "i_part": str(i_part),
783
+ "all_parts": str(all_parts),
784
+ "_CUDA_VISIBLE_DEVICES": gpu_names[i_part],
785
+ }
786
+ )
787
+ os.environ.update(config)
788
+ cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
789
+ print(cmd)
790
+ p = Popen(cmd, shell=True)
791
+ ps1abc.append(p)
792
+ yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
793
+ for p in ps1abc:p.wait()
794
+
795
+ opt = ["item_name\tsemantic_audio"]
796
+ for i_part in range(all_parts):
797
+ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
798
+ with open(semantic_path, "r",encoding="utf8") as f:
799
+ opt += f.read().strip("\n").split("\n")
800
+ os.remove(semantic_path)
801
+ with open(path_semantic, "w",encoding="utf8") as f:
802
+ f.write("\n".join(opt) + "\n")
803
+ yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
804
+ ps1abc = []
805
+ yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
806
+ except:
807
+ traceback.print_exc()
808
+ close1abc()
809
+ yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
810
+ else:
811
+ yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
812
+
813
+ def close1abc():
814
+ global ps1abc
815
+ if (ps1abc != []):
816
+ for p1abc in ps1abc:
817
+ try:
818
+ kill_process(p1abc.pid)
819
+ except:
820
+ traceback.print_exc()
821
+ ps1abc=[]
822
+ return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
823
+
824
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
825
+ gr.Markdown("# <center>🌊💕🎶 XTTS 微调:2分钟语音,开启中日英16种语言真实拟声</center>")
826
+ gr.Markdown("## <center>🌟 只需2分钟的语音,一键在线微调 最强多语种模型</center>")
827
+ gr.Markdown("### <center>🤗 更多精彩,尽在[滔滔AI](https://www.talktalkai.com/);滔滔AI,为爱滔滔!💕</center>")
828
+
829
+ with gr.Tabs():
830
+ with gr.TabItem(i18n("1 - 制作数据集")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
831
+ #gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具"))
832
+ with gr.Row():
833
+ if_uvr5 = gr.Checkbox(label=i18n("是否开启UVR5-WebUI"),show_label=True, visible=False)
834
+ uvr5_info = gr.Textbox(label=i18n("UVR5进程输出信息"), visible=False)
835
+ gr.Markdown(value=i18n("1a-语音切分工具"))
836
+ with gr.Row():
837
+ with gr.Row():
838
+ slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),info="您需要先在GPT-SoVITS-v2文件夹中上传训练音频,如jay.wav;音频时长建议大于2分钟",value="",placeholder="jay.wav")
839
+ slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt")
840
+ threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34")
841
+ min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000")
842
+ min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300")
843
+ hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10")
844
+ max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500")
845
+ with gr.Row():
846
+ open_slicer_button=gr.Button(i18n("1. 开启语音切割"), variant="primary",visible=True)
847
+ close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False)
848
+ _max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True)
849
+ alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
850
+ n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
851
+ slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
852
+ #gr.Markdown(value=i18n("0bb-语音降噪工具"))
853
+ with gr.Row():
854
+ open_denoise_button = gr.Button(i18n("开启语音降噪"), visible=False)
855
+ close_denoise_button = gr.Button(i18n("终止语音降噪进程"), variant="primary",visible=False)
856
+ denoise_input_dir=gr.Textbox(label=i18n("降噪音频文件输入文件夹"),value="", visible=False)
857
+ denoise_output_dir=gr.Textbox(label=i18n("降噪结果输出文件夹"),value="output/denoise_opt", visible=False)
858
+ denoise_info = gr.Textbox(label=i18n("语音降噪进程输出信息"), visible=False)
859
+ gr.Markdown(value=i18n("1b-批量语音识别"))
860
+ with gr.Row():
861
+ open_asr_button = gr.Button(i18n("2. 开启离线批量ASR"), variant="primary",visible=True)
862
+ close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
863
+ with gr.Column():
864
+ with gr.Row():
865
+ asr_inp_dir = gr.Textbox(
866
+ label=i18n("输入文件夹路径"),
867
+ value="output/slicer_opt",
868
+ interactive=True,
869
+ )
870
+ asr_opt_dir = gr.Textbox(
871
+ label = i18n("输出文件夹路径"),
872
+ value = "output/asr_opt",
873
+ interactive = True,
874
+ )
875
+ with gr.Row():
876
+ asr_model = gr.Dropdown(
877
+ label = i18n("ASR 模型"),
878
+ choices = list(asr_dict.keys()),
879
+ interactive = True,
880
+ value="达摩 ASR (中文)"
881
+ )
882
+ asr_size = gr.Dropdown(
883
+ label = i18n("ASR 模型尺寸"),
884
+ choices = ["large"],
885
+ interactive = True,
886
+ value="large"
887
+ )
888
+ asr_lang = gr.Dropdown(
889
+ label = i18n("ASR 语言设置"),
890
+ choices = ["zh"],
891
+ interactive = True,
892
+ value="zh"
893
+ )
894
+ lang = asr_lang
895
+ with gr.Row():
896
+ asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
897
+
898
+ def change_lang_choices(key): #根据选择的模型修改可选的语言
899
+ # return gr.Dropdown(choices=asr_dict[key]['lang'])
900
+ return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]}
901
+ def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
902
+ # return gr.Dropdown(choices=asr_dict[key]['size'])
903
+ return {"__type__": "update", "choices": asr_dict[key]['size']}
904
+ asr_model.change(change_lang_choices, [asr_model], [asr_lang])
905
+ asr_model.change(change_size_choices, [asr_model], [asr_size])
906
+
907
+ gr.Markdown(value=i18n("1c-语音文本校对标注工具"))
908
+ with gr.Row():
909
+ if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True)
910
+ path_list = gr.Textbox(
911
+ label=i18n(".list标注文件的路径"),
912
+ value="output/asr_opt/slicer_opt.list",
913
+ interactive=True,
914
+ )
915
+ label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
916
+ if_label.change(change_label, [if_label,path_list], [label_info])
917
+ if_uvr5.change(change_uvr5, [if_uvr5], [uvr5_info])
918
+ open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang], [asr_info,open_asr_button,close_asr_button])
919
+ close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
920
+ open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button])
921
+ close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
922
+ open_denoise_button.click(open_denoise, [denoise_input_dir,denoise_output_dir], [denoise_info,open_denoise_button,close_denoise_button])
923
+ close_denoise_button.click(close_denoise, [], [denoise_info,open_denoise_button,close_denoise_button])
924
+
925
+ with gr.Tab("2 - XTTS模型微调"):
926
+ inp_list_path_value = str(Path.cwd() / "output/asr_opt/slicer_opt.list")
927
+ out_csv_path_value = str(Path.cwd() / "output.csv")
928
+ inp_list_path = gr.Textbox(value=inp_list_path_value, label=".list文件地址")
929
+ out_csv_path = gr.Textbox(value=out_csv_path_value, label=".csv文件地址")
930
+ list_to_csv = gr.Button("3. 准备训练csv文件", variant="primary")
931
+ train_csv = gr.Textbox(
932
+ label="训练数据集csv文件",
933
+ )
934
+ eval_csv = gr.Textbox(
935
+ label="评价数据集csv文件",
936
+ )
937
+ list_to_csv.click(convert_list_to_csv, [inp_list_path, out_csv_path], [train_csv, eval_csv])
938
+ out_path_value = str(Path.cwd() / "finetune_models")
939
+ out_path = gr.Textbox(value=out_path_value, label="XTTS微调模型的文件夹")
940
+ num_epochs = gr.Slider(
941
+ label="训练步数 Number of epochs:",
942
+ minimum=1,
943
+ maximum=100,
944
+ step=1,
945
+ value=6,
946
+ )
947
+ batch_size = gr.Slider(
948
+ label="Batch size:",
949
+ minimum=2,
950
+ maximum=512,
951
+ step=1,
952
+ value=2,
953
+ )
954
+ grad_acumm = gr.Slider(
955
+ label="Grad accumulation steps:",
956
+ minimum=1,
957
+ maximum=128,
958
+ step=1,
959
+ value=1,
960
+ )
961
+ max_audio_length = gr.Slider(
962
+ label="Max permitted audio size in seconds:",
963
+ minimum=2,
964
+ maximum=20,
965
+ step=1,
966
+ value=11,
967
+ visible=False,
968
+ )
969
+ progress_train = gr.Label(
970
+ label="训练进程"
971
+ )
972
+ logs_tts_train = gr.Textbox(
973
+ label="训练详细信息",
974
+ interactive=False,
975
+ )
976
+ app.load(read_logs, None, logs_tts_train, every=1)
977
+ train_btn = gr.Button(value="4. 开始模型训练", variant="primary")
978
+
979
+ def train_model(language, train_csv, eval_csv, num_epochs, batch_size, grad_acumm, output_path, max_audio_length):
980
+ clear_gpu_cache()
981
+ if not train_csv or not eval_csv:
982
+ return "You need to run the data processing step or manually set `Train CSV` and `Eval CSV` fields !", "", "", "", ""
983
+ try:
984
+ # convert seconds to waveform frames
985
+ max_audio_length = int(max_audio_length * 22050)
986
+ config_path, original_xtts_checkpoint, vocab_file, exp_path, speaker_wav = train_gpt(language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv, output_path=output_path, max_audio_length=max_audio_length)
987
+ except:
988
+ traceback.print_exc()
989
+ error = traceback.format_exc()
990
+ return f"The training was interrupted due an error !! Please check the console to check the full error message! \n Error summary: {error}", "", "", "", ""
991
+
992
+ # copy original files to avoid parameters changes issues
993
+ os.system(f"cp {config_path} {exp_path}")
994
+ os.system(f"cp {vocab_file} {exp_path}")
995
+
996
+ ft_xtts_checkpoint = os.path.join(exp_path, "best_model.pth")
997
+ print("Model training done!")
998
+ clear_gpu_cache()
999
+ return "Model training done!", config_path, vocab_file, ft_xtts_checkpoint, speaker_wav
1000
+
1001
+ with gr.Tab("3 - XTTS语音合成"):
1002
+ with gr.Row():
1003
+ with gr.Column() as col1:
1004
+ xtts_checkpoint = gr.Textbox(
1005
+ label="XTTS checkpoint 路径",
1006
+ value="",
1007
+ )
1008
+ xtts_config = gr.Textbox(
1009
+ label="XTTS config 路径",
1010
+ value="",
1011
+ )
1012
+
1013
+ xtts_vocab = gr.Textbox(
1014
+ label="XTTS vocab 路径",
1015
+ value="",
1016
+ )
1017
+ progress_load = gr.Label(
1018
+ label="模型加载进程"
1019
+ )
1020
+ load_btn = gr.Button(value="5. 加载已训练好的模型", variant="primary")
1021
+
1022
+ with gr.Column() as col2:
1023
+ ref_audio_names = os.listdir("output/slicer_opt")
1024
+ ref_audio_list = [os.path.join("output/slicer_opt", ref_audio_name) for ref_audio_name in ref_audio_names]
1025
+ speaker_reference_audio = gr.Dropdown(
1026
+ label="请选择一条参考音频",
1027
+ info="不同参考音频对应的合成效果不同,您可以多次尝试",
1028
+ value=ref_audio_list[0],
1029
+ choices = ref_audio_list
1030
+ )
1031
+ tts_language = gr.Dropdown(
1032
+ label="语音合成的语言",
1033
+ value="zh",
1034
+ choices=[
1035
+ "en",
1036
+ "es",
1037
+ "fr",
1038
+ "de",
1039
+ "it",
1040
+ "pt",
1041
+ "pl",
1042
+ "tr",
1043
+ "ru",
1044
+ "nl",
1045
+ "cs",
1046
+ "ar",
1047
+ "zh",
1048
+ "hu",
1049
+ "ko",
1050
+ "ja",
1051
+ ]
1052
+ )
1053
+
1054
+ tts_text = gr.Textbox(
1055
+ label="请填写语音合成的文本.",
1056
+ placeholder="想说却还没说的,还很多",
1057
+ )
1058
+ tts_btn = gr.Button(value="6. 开启AI语音之旅吧💕", variant="primary")
1059
+
1060
+ with gr.Column() as col3:
1061
+ progress_gen = gr.Label(
1062
+ label="语音合成进程"
1063
+ )
1064
+ tts_output_audio = gr.Audio(label="为您合成的专属音频.")
1065
+ reference_audio = gr.Audio(label="您使用的参考音频")
1066
+
1067
+ train_btn.click(
1068
+ fn=train_model,
1069
+ inputs=[
1070
+ lang,
1071
+ train_csv,
1072
+ eval_csv,
1073
+ num_epochs,
1074
+ batch_size,
1075
+ grad_acumm,
1076
+ out_path,
1077
+ max_audio_length,
1078
+ ],
1079
+ outputs=[progress_train, xtts_config, xtts_vocab, xtts_checkpoint, speaker_reference_audio],
1080
+ )
1081
+
1082
+ load_btn.click(
1083
+ fn=load_model,
1084
+ inputs=[
1085
+ xtts_checkpoint,
1086
+ xtts_config,
1087
+ xtts_vocab
1088
+ ],
1089
+ outputs=[progress_load],
1090
+ )
1091
+
1092
+ tts_btn.click(
1093
+ fn=run_tts,
1094
+ inputs=[
1095
+ tts_language,
1096
+ tts_text,
1097
+ speaker_reference_audio,
1098
+ ],
1099
+ outputs=[progress_gen, tts_output_audio, reference_audio],
1100
+ )
1101
+
1102
+ gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
1103
+ gr.HTML('''
1104
+ <div class="footer">
1105
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
1106
+ </p>
1107
+ </div>
1108
+ ''')
1109
+ app.queue().launch(
1110
+ share=True,
1111
+ show_error=True,
1112
+ )