import gradio as gr import numpy as np import torch from dotenv import load_dotenv from sklearn.cluster import MiniBatchKMeans from configs.config import Config from i18n.i18n import I18nAuto from infer.lib.train.process_ckpt import ( change_info, extract_small_model, merge, show_info, ) from infer.modules.uvr5.modules import uvr from infer.modules.vc.modules import VC logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) tmp = os.path.join(now_dir, "TEMP") shutil.rmtree(tmp, ignore_errors=True) shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True) os.makedirs(tmp, exist_ok=True) os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) os.environ["TEMP"] = tmp warnings.filterwarnings("ignore") torch.manual_seed(114514) load_dotenv() config = Config() vc = VC(config) if config.dml == True: def forward_dml(ctx, x, scale): ctx.scale = scale res = x.clone().detach() return res fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml i18n = I18nAuto() logger.info(i18n) # 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) if any( value in gpu_name.upper() for value in [ "10", "16", "20", "30", "40", "A2", "A3", "A4", "P4", "A50", "500", "A60", "70", "80", "90", "M4", "T4", "TITAN", ] ): # A10#A100#V100#A40#P40#M40#K80#A4500 if_gpu_ok = True # 至少有一张能用的N卡 gpu_infos.append("%s\t%s" % (i, gpu_name)) mem.append( int( torch.cuda.get_device_properties(i).total_memory / 1024 / 1024 / 1024 + 0.4 ) ) if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 else: gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练") default_batch_size = 1 gpus = "-".join([i[0] for i in gpu_infos]) class ToolButton(gr.Button, gr.components.FormComponent): """Small button with single emoji as text, fits inside gradio forms""" def __init__(self, **kwargs): super().__init__(variant="tool", **kwargs) def get_block_name(self): return "button" weight_root = os.getenv("weight_root") weight_uvr5_root = os.getenv("weight_uvr5_root") index_root = os.getenv("index_root") names = [] for name in os.listdir(weight_root): if name.endswith(".pth"): names.append(name) index_paths = [] for root, dirs, files in os.walk(index_root, topdown=False): for name in files: if name.endswith(".index") and "trained" not in name: index_paths.append("%s/%s" % (root, name)) uvr5_names = [] for name in os.listdir(weight_uvr5_root): if name.endswith(".pth") or "onnx" in name: uvr5_names.append(name.replace(".pth", "")) def change_choices(): names = [] for name in os.listdir(weight_root): if name.endswith(".pth"): names.append(name) index_paths = [] for root, dirs, files in os.walk(index_root, topdown=False): for name in files: if name.endswith(".index") and "trained" not in name: index_paths.append("%s/%s" % (root, name)) audio_files=[] for filename in os.listdir("./audios"): if filename.endswith(('.wav','.mp3','.ogg')): audio_files.append('./audios/'+filename) return {"choices": sorted(names), "__type__": "update"}, { "choices": sorted(index_paths), "__type__": "update", }, {"choices": sorted(audio_files), "__type__": "update"} def clean(): return {"value": "", "__type__": "update"} def export_onnx(): from infer.modules.onnx.export import export_onnx as eo eo() sr_dict = { "32k": 32000, "40k": 40000, "48k": 48000, } def if_done(done, p): while 1: if p.poll() is None: sleep(0.5) else: break done[0] = True def if_done_multi(done, ps): while 1: # poll==None代表进程未结束 # 只要有一个进程未结束都不停 flag = 1 for p in ps: if p.poll() is None: flag = 0 sleep(0.5) break if flag == 1: break done[0] = True def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): sr = sr_dict[sr] os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") f.close() per = 3.0 if config.is_half else 3.7 cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % ( config.python_cmd, trainset_dir, sr, n_p, now_dir, exp_dir, config.noparallel, per, ) logger.info(cmd) p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, args=( done, p, ), ).start() while 1: with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: yield (f.read()) sleep(1) if done[0]: break with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: log = f.read() logger.info(log) yield log # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2]) def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvpe): gpus = gpus.split("-") os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") f.close() if if_f0: if f0method != "rmvpe_gpu": cmd = ( '"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s' % ( config.python_cmd, now_dir, exp_dir, n_p, f0method, ) ) logger.info(cmd) p = Popen( cmd, shell=True, cwd=now_dir ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, args=( done, p, ), ).start() else: if gpus_rmvpe != "-": gpus_rmvpe = gpus_rmvpe.split("-") leng = len(gpus_rmvpe) ps = [] for idx, n_g in enumerate(gpus_rmvpe): cmd = ( '"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s ' % ( config.python_cmd, leng, idx, n_g, now_dir, exp_dir, config.is_half, ) ) logger.info(cmd) p = Popen( cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, # args=( done, ps, ), ).start() else: cmd = ( config.python_cmd + ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" ' % ( now_dir, exp_dir, ) ) logger.info(cmd) p = Popen( cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir p.wait() done = [True] while 1: with open( "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" ) as f: yield (f.read()) sleep(1) if done[0]: break with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: log = f.read() logger.info(log) yield log ####对不同part分别开多进程 """ n_part=int(sys.argv[1]) i_part=int(sys.argv[2]) i_gpu=sys.argv[3] exp_dir=sys.argv[4] os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) """ leng = len(gpus) ps = [] for idx, n_g in enumerate(gpus): cmd = ( '"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s' % ( config.python_cmd, config.device, leng, idx, n_g, now_dir, exp_dir, version19, ) ) logger.info(cmd) p = Popen( cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, args=( done, ps, ), ).start() while 1: with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: yield (f.read()) sleep(1) if done[0]: break with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: log = f.read() logger.info(log) yield log def get_pretrained_models(path_str, f0_str, sr2): if_pretrained_generator_exist = os.access( "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK ) if_pretrained_discriminator_exist = os.access( "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK ) if not if_pretrained_generator_exist: logger.warn( "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model", path_str, f0_str, sr2, ) if not if_pretrained_discriminator_exist: logger.warn( "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model", path_str, f0_str, sr2, ) return ( "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2) if if_pretrained_generator_exist else "", "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2) if if_pretrained_discriminator_exist else "", ) def change_sr2(sr2, if_f0_3, version19): path_str = "" if version19 == "v1" else "_v2" f0_str = "f0" if if_f0_3 else "" return get_pretrained_models(path_str, f0_str, sr2) def change_version19(sr2, if_f0_3, version19): path_str = "" if version19 == "v1" else "_v2" if sr2 == "32k" and version19 == "v1": sr2 = "40k" to_return_sr2 = ( {"choices": ["40k", "48k"], "__type__": "update", "value": sr2} if version19 == "v1" else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} ) f0_str = "f0" if if_f0_3 else "" return ( *get_pretrained_models(path_str, f0_str, sr2), to_return_sr2, ) def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 path_str = "" if version19 == "v1" else "_v2" return ( {"visible": if_f0_3, "__type__": "update"}, *get_pretrained_models(path_str, "f0", sr2), ) # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16]) def click_train( exp_dir1, sr2, if_f0_3, spk_id5, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17, if_save_every_weights18, version19, ): # 生成filelist exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) os.makedirs(exp_dir, exist_ok=True) gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) feature_dir = ( "%s/3_feature256" % (exp_dir) if version19 == "v1" else "%s/3_feature768" % (exp_dir) ) if if_f0_3: f0_dir = "%s/2a_f0" % (exp_dir) f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) names = ( set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set([name.split(".")[0] for name in os.listdir(feature_dir)]) & set([name.split(".")[0] for name in os.listdir(f0_dir)]) & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) ) else: names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( [name.split(".")[0] for name in os.listdir(feature_dir)] ) opt = [] for name in names: if if_f0_3: opt.append( "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" % ( gt_wavs_dir.replace("\\", "\\\\"), name, feature_dir.replace("\\", "\\\\"), name, f0_dir.replace("\\", "\\\\"), name, f0nsf_dir.replace("\\", "\\\\"), name, spk_id5, ) ) else: opt.append( "%s/%s.wav|%s/%s.npy|%s" % ( gt_wavs_dir.replace("\\", "\\\\"), name, feature_dir.replace("\\", "\\\\"), name, spk_id5, ) ) fea_dim = 256 if version19 == "v1" else 768 if if_f0_3: for _ in range(2): opt.append( "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) ) else: for _ in range(2): opt.append( "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" % (now_dir, sr2, now_dir, fea_dim, spk_id5) ) shuffle(opt) with open("%s/filelist.txt" % exp_dir, "w") as f: f.write("\n".join(opt)) logger.debug("Write filelist done") # 生成config#无需生成config # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0" logger.info("Use gpus: %s", str(gpus16)) if pretrained_G14 == "": logger.info("No pretrained Generator") if pretrained_D15 == "": logger.info("No pretrained Discriminator") if version19 == "v1" or sr2 == "40k": config_path = "v1/%s.json" % sr2 else: config_path = "v2/%s.json" % sr2 config_save_path = os.path.join(exp_dir, "config.json") if not pathlib.Path(config_save_path).exists(): with open(config_save_path, "w", encoding="utf-8") as f: json.dump( config.json_config[config_path], f, ensure_ascii=False, indent=4, sort_keys=True, ) f.write("\n") if gpus16: cmd = ( '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' % ( config.python_cmd, exp_dir1, sr2, 1 if if_f0_3 else 0, batch_size12, gpus16, total_epoch11, save_epoch10, "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", 1 if if_save_latest13 == i18n("是") else 0, 1 if if_cache_gpu17 == i18n("是") else 0, 1 if if_save_every_weights18 == i18n("是") else 0, version19, ) ) else: cmd = ( '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' % ( config.python_cmd, exp_dir1, sr2, 1 if if_f0_3 else 0, batch_size12, total_epoch11, save_epoch10, "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", 1 if if_save_latest13 == i18n("是") else 0, 1 if if_cache_gpu17 == i18n("是") else 0, 1 if if_save_every_weights18 == i18n("是") else 0, version19, ) ) logger.info(cmd) p = Popen(cmd, shell=True, cwd=now_dir) p.wait() return "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log" # but4.click(train_index, [exp_dir1], info3) def train_index(exp_dir1, version19): # exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) exp_dir = "logs/%s" % (exp_dir1) os.makedirs(exp_dir, exist_ok=True) feature_dir = ( "%s/3_feature256" % (exp_dir) if version19 == "v1" else "%s/3_feature768" % (exp_dir) ) if not os.path.exists(feature_dir): return "请先进行特征提取!" listdir_res = list(os.listdir(feature_dir)) if len(listdir_res) == 0: return "请先进行特征提取!" infos = [] npys = [] for name in sorted(listdir_res): phone = np.load("%s/%s" % (feature_dir, name)) npys.append(phone) big_npy = np.concatenate(npys, 0) big_npy_idx = np.arange(big_npy.shape[0]) np.random.shuffle(big_npy_idx) big_npy = big_npy[big_npy_idx] if big_npy.shape[0] > 2e5: infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) yield "\n".join(infos) try: big_npy = ( MiniBatchKMeans( n_clusters=10000, verbose=True, batch_size=256 * config.n_cpu, compute_labels=False, init="random", ) .fit(big_npy) .cluster_centers_ ) except: info = traceback.format_exc() logger.info(info) infos.append(info) yield "\n".join(infos) np.save("%s/total_fea.npy" % exp_dir, big_npy) n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) infos.append("%s,%s" % (big_npy.shape, n_ivf)) yield "\n".join(infos) index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) infos.append("training") yield "\n".join(infos) index_ivf = faiss.extract_index_ivf(index) # index_ivf.nprobe = 1 index.train(big_npy) faiss.write_index( index, "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), ) infos.append("adding") yield "\n".join(infos) batch_size_add = 8192 for i in range(0, big_npy.shape[0], batch_size_add): index.add(big_npy[i : i + batch_size_add]) faiss.write_index( index, "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), ) infos.append( "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index" % (n_ivf, index_ivf.nprobe, exp_dir1, version19) ) # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19)) yield "\n".join(infos) # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3) def train1key( exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17, if_save_every_weights18, version19, gpus_rmvpe, ): infos = [] def get_info_str(strr): infos.append(strr) return "\n".join(infos) ####### step1:处理数据 yield get_info_str(i18n("step1:正在处理数据")) [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)] ####### step2a:提取音高 yield get_info_str(i18n("step2:正在提取音高&正在提取特征")) [ get_info_str(_) for _ in extract_f0_feature( gpus16, np7, f0method8, if_f0_3, exp_dir1, version19, gpus_rmvpe ) ] ####### step3a:训练模型 yield get_info_str(i18n("step3a:正在训练模型")) click_train( exp_dir1, sr2, if_f0_3, spk_id5, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17, if_save_every_weights18, version19, ) yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")) ####### step3b:训练索引 [get_info_str(_) for _ in train_index(exp_dir1, version19)] yield get_info_str(i18n("全流程结束!")) # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__]) def change_info_(ckpt_path): if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")): return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} try: with open( ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" ) as f: info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) sr, f0 = info["sample_rate"], info["if_f0"] version = "v2" if ("version" in info and info["version"] == "v2") else "v1" return sr, str(f0), version except: traceback.print_exc() return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} F0GPUVisible = config.dml == False def change_f0_method(f0method8): if f0method8 == "rmvpe_gpu": visible = F0GPUVisible else: visible = False return {"visible": visible, "__type__": "update"} def find_model(): if len(names) > 0: vc.get_vc(sorted(names)[0],None,None) return sorted(names)[0] else: try: gr.Info("Do not forget to choose a model.") except: pass return '' def find_audios(index=False): audio_files=[] if not os.path.exists('./audios'): os.mkdir("./audios") for filename in os.listdir("./audios"): if filename.endswith(('.wav','.mp3','.ogg')): audio_files.append("./audios/"+filename) if index: if len(audio_files) > 0: return sorted(audio_files)[0] else: return "" elif len(audio_files) > 0: return sorted(audio_files) else: return [] def get_index(): if find_model() != '': chosen_model=sorted(names)[0].split(".")[0] logs_path="./logs/"+chosen_model if os.path.exists(logs_path): for file in os.listdir(logs_path): if file.endswith(".index"): return os.path.join(logs_path, file) return '' else: return '' def get_indexes(): indexes_list=[] for dirpath, dirnames, filenames in os.walk("./logs/"): for filename in filenames: if filename.endswith(".index"): indexes_list.append(os.path.join(dirpath,filename)) if len(indexes_list) > 0: return indexes_list else: return '' def save_wav(file): try: file_path=file.name shutil.move(file_path,'./audios') return './audios/'+os.path.basename(file_path) except AttributeError: try: new_name = 'kpop'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' new_path='./audios/'+new_name shutil.move(file,new_path) return new_path except TypeError: return None def download_from_url(url, model): if url == '': return "URL cannot be left empty." if model =='': return "You need to name your model. With the model maker For example: Model-.nww's" url = url.strip() zip_dirs = ["zips", "unzips"] for directory in zip_dirs: if os.path.exists(directory): shutil.rmtree(directory) os.makedirs("zips", exist_ok=True) os.makedirs("unzips", exist_ok=True) zipfile = model + '.zip' zipfile_path = './zips/' + zipfile try: if "drive.google.com" in url: subprocess.run(["gdown", url, "--fuzzy", "-O", zipfile_path]) elif "mega.nz" in url: m = Mega() m.download_url(url, './zips') else: subprocess.run(["wget", url, "-O", zipfile_path]) for filename in os.listdir("./zips"): if filename.endswith(".zip"): zipfile_path = os.path.join("./zips/",filename) shutil.unpack_archive(zipfile_path, "./unzips", 'zip') else: return "No zipfile found." for root, dirs, files in os.walk('./unzips'): for file in files: file_path = os.path.join(root, file) if file.endswith(".index"): os.mkdir(f'./logs/{model}') shutil.copy2(file_path,f'./logs/{model}') elif "G_" not in file and "D_" not in file and file.endswith(".pth"): shutil.copy(file_path,f'./assets/weights/{model}.pth') shutil.rmtree("zips") shutil.rmtree("unzips") return "Model Successfully Imported. (If you are using a google drive link it may not work even this said Success)" except: return "There's been an error. (Check your link again!) or or (it worked and this is a false error haha... help)" def upload_to_dataset(files, dir): if dir == '': dir = './dataset/'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if not os.path.exists(dir): os.makedirs(dir) for file in files: path=file.name shutil.copy2(path,dir) try: gr.Info(i18n("处理数据")) except: pass return i18n("处理数据"), {"value":dir,"__type__":"update"} def download_model_files(model): model_found = False index_found = False if os.path.exists(f'./assets/weights/{model}.pth'): model_found = True if os.path.exists(f'./logs/{model}'): for file in os.listdir(f'./logs/{model}'): if file.endswith('.index') and 'added' in file: log_file = file index_found = True if model_found and index_found: return [f'./assets/weights/{model}.pth', f'./logs/{model}/{log_file}'], "Done" elif model_found and not index_found: return f'./assets/weights/{model}.pth', "Could not find Index file." elif index_found and not model_found: return f'./logs/{model}/{log_file}', f'Make sure the Voice Name is correct. I could not find {model}.pth' else: return None, f'Could not find {model}.pth or corresponding Index file.' with gr.Blocks(title="KPOPEASYGUI 🔊",theme=gr.themes.Base(primary_hue="rose", secondary_hue="pink", neutral_hue="slate")) as app: with gr.Row(): gr.HTML("image/gif") with gr.Tabs(): with gr.TabItem(i18n("模型推理")): with gr.Row(): sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names), value=find_model()) refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary") #clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") spk_item = gr.Slider( minimum=0, maximum=2333, step=1, label=i18n("请选择说话人id"), value=0, visible=False, interactive=True, ) #clean_button.click( # fn=clean, inputs=[], outputs=[sid0], api_name="infer_clean" #) vc_transform0 = gr.Number( label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 ) but0 = gr.Button(i18n("转换"), variant="primary") with gr.Row(): with gr.Column(): with gr.Row(): dropbox = gr.File(label="Drop your audio here & hit the Reload button.") with gr.Row(): record_button=gr.Audio(source="microphone", label="OR Record audio.", type="filepath") with gr.Row(): input_audio0 = gr.Dropdown( label=i18n("输入待处理音频文件路径(默认是正确格式示例)"), value=find_audios(True), choices=find_audios() ) record_button.change(fn=save_wav, inputs=[record_button], outputs=[input_audio0]) dropbox.upload(fn=save_wav, inputs=[dropbox], outputs=[input_audio0]) with gr.Column(): with gr.Accordion(label=i18n("自动检测index路径,下拉式选择(dropdown)"), open=False): file_index2 = gr.Dropdown( label=i18n("自动检测index路径,下拉式选择(dropdown)"), choices=get_indexes(), interactive=True, value=get_index() ) index_rate1 = gr.Slider( minimum=0, maximum=1, label=i18n("检索特征占比"), value=0.80, interactive=True, ) vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) with gr.Accordion(label=i18n("常规设置"), open=False): f0method0 = gr.Radio( label=i18n( "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" ), choices=["pm", "harvest", "crepe", "rmvpe"] if config.dml == False else ["pm", "harvest", "rmvpe"], value="rmvpe", interactive=True, ) filter_radius0 = gr.Slider( minimum=0, maximum=7, label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), value=3, step=1, interactive=True, ) resample_sr0 = gr.Slider( minimum=0, maximum=48000, label=i18n("后处理重采样至最终采样率,0为不进行重采样"), value=0, step=1, interactive=True, visible=False ) rms_mix_rate0 = gr.Slider( minimum=0, maximum=1, label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), value=0.21, interactive=True, ) protect0 = gr.Slider( minimum=0, maximum=0.5, label=i18n( "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" ), value=0.28, step=0.01, interactive=True, ) file_index1 = gr.Textbox( label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), value="", interactive=True, visible=False ) refresh_button.click( fn=change_choices, inputs=[], outputs=[sid0, file_index2, input_audio0], api_name="infer_refresh", ) # file_big_npy1 = gr.Textbox( # label=i18n("特征文件路径"), # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", # interactive=True, # ) with gr.Row(): f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False) with gr.Row(): vc_output1 = gr.Textbox(label=i18n("输出信息")) but0.click( vc.vc_single, [ spk_item, input_audio0, vc_transform0, f0_file, f0method0, file_index1, file_index2, # file_big_npy1, index_rate1, filter_radius0, resample_sr0, rms_mix_rate0, protect0, ], [vc_output1, vc_output2], api_name="infer_convert", ) with gr.Row(): with gr.Accordion(open=False, label=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ")): with gr.Row(): opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") vc_transform1 = gr.Number( label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 ) f0method1 = gr.Radio( label=i18n( "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" ), choices=["pm", "harvest", "crepe", "rmvpe"] if config.dml == False else ["pm", "harvest", "rmvpe"], value="pm", interactive=True, ) with gr.Row(): filter_radius1 = gr.Slider( minimum=0, maximum=7, label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), value=3, step=1, interactive=True, visible=False ) with gr.Row(): file_index3 = gr.Textbox( label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), value="", interactive=True, visible=False ) file_index4 = gr.Dropdown( label=i18n("自动检测index路径,下拉式选择(dropdown)"), choices=sorted(index_paths), interactive=True, visible=False ) refresh_button.click( fn=lambda: change_choices()[1], inputs=[], outputs=file_index4, api_name="infer_refresh_batch", ) # file_big_npy2 = gr.Textbox( # label=i18n("特征文件路径"), # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", # interactive=True, # ) index_rate2 = gr.Slider( minimum=0, maximum=1, label=i18n("检索特征占比"), value=1, interactive=True, visible=False ) with gr.Row(): resample_sr1 = gr.Slider( minimum=0, maximum=48000, label=i18n("后处理重采样至最终采样率,0为不进行重采样"), value=0, step=1, interactive=True, visible=False ) rms_mix_rate1 = gr.Slider( minimum=0, maximum=1, label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), value=0.21, interactive=True, ) protect1 = gr.Slider( minimum=0, maximum=0.5, label=i18n( "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" ), value=0.28, step=0.01, interactive=True, ) with gr.Row(): dir_input = gr.Textbox( label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), value="./audios", ) inputs = gr.File( file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") ) with gr.Row(): format1 = gr.Radio( label=i18n("导出文件格式"), choices=["wav", "flac", "mp3", "m4a"], value="wav", interactive=True, ) but1 = gr.Button(i18n("转换"), variant="primary") vc_output3 = gr.Textbox(label=i18n("输出信息")) but1.click( vc.vc_multi, [ spk_item, dir_input, opt_input, inputs, vc_transform1, f0method1, file_index1, file_index2, # file_big_npy2, index_rate1, filter_radius1, resample_sr1, rms_mix_rate1, protect1, format1, ], [vc_output3], api_name="infer_convert_batch", ) sid0.change( fn=vc.get_vc, inputs=[sid0, protect0, protect1], outputs=[spk_item, protect0, protect1, file_index2, file_index4], ) with gr.TabItem("Download Model"): with gr.Row(): gr.Markdown( """ ⚠️ Google Drive Links, and some leelo models will not work with this gradio ⚠️ """ ) with gr.Row(): url=gr.Textbox(label="Enter the URL to the Model:") with gr.Row(): model = gr.Textbox(label="Name your model (with model maker name!!!):") download_button=gr.Button("Download") with gr.Row(): status_bar=gr.Textbox(label="") download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar]) with gr.Row(): gr.Markdown( """ ❤️ Support Original Creator from this easyGUI ❤️ paypal.me/lesantillan """ ) with gr.TabItem("Training"): with gr.Row(): gr.Markdown( """ ⚠️ HAHAH YOU THOUGHT I ADDED TRAINING??? NO OFC DUH ⚠️ """ ) if config.iscolab: app.queue(concurrency_count=511, max_size=1022).launch(share=True), favicon_path="./TW.png", else: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", favicon_path="./TW.png", inbrowser=not config.noautoopen, server_port=config.listen_port, quiet=True, )