l73jiang commited on
Commit
0c062c6
·
verified ·
1 Parent(s): 6b9b9b6

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +218 -218
config.py CHANGED
@@ -1,218 +1,218 @@
1
- import os
2
- import re
3
- import sys
4
-
5
- import torch
6
-
7
- from tools.i18n.i18n import I18nAuto
8
-
9
- i18n = I18nAuto(language=os.environ.get("language", "Auto"))
10
-
11
-
12
- pretrained_sovits_name = {
13
- "v1": "pretrained_models/s2G488k.pth",
14
- "v2": "pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
15
- "v3": "pretrained_models/s2Gv3.pth", ###v3v4还要检查vocoder,算了。。。
16
- "v4": "pretrained_models/gsv-v4-pretrained/s2Gv4.pth",
17
- "v2Pro": "pretrained_models/v2Pro/s2Gv2Pro.pth",
18
- "v2ProPlus": "pretrained_models/v2Pro/s2Gv2ProPlus.pth",
19
- }
20
-
21
- pretrained_gpt_name = {
22
- "v1": "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
23
- "v2": "pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
24
- "v3": "pretrained_models/s1v3.ckpt",
25
- "v4": "pretrained_models/s1v3.ckpt",
26
- "v2Pro": "pretrained_models/s1v3.ckpt",
27
- "v2ProPlus": "pretrained_models/s1v3.ckpt",
28
- }
29
- name2sovits_path = {
30
- # i18n("不训练直接推v1底模!"): "pretrained_models/s2G488k.pth",
31
- i18n("不训练直接推v2底模!"): "pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
32
- # i18n("不训练直接推v3底模!"): "pretrained_models/s2Gv3.pth",
33
- # i18n("不训练直接推v4底模!"): "pretrained_models/gsv-v4-pretrained/s2Gv4.pth",
34
- i18n("不训练直接推v2Pro底模!"): "pretrained_models/v2Pro/s2Gv2Pro.pth",
35
- i18n("不训练直接推v2ProPlus底模!"): "pretrained_models/v2Pro/s2Gv2ProPlus.pth",
36
- }
37
- name2gpt_path = {
38
- # i18n("不训练直接推v1底模!"):"pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
39
- i18n(
40
- "不训练直接推v2底模!"
41
- ): "pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
42
- i18n("不训练直接推v3底模!"): "pretrained_models/s1v3.ckpt",
43
- }
44
- SoVITS_weight_root = [
45
- "SoVITS_weights",
46
- "SoVITS_weights_v2",
47
- "SoVITS_weights_v3",
48
- "SoVITS_weights_v4",
49
- "SoVITS_weights_v2Pro",
50
- "SoVITS_weights_v2ProPlus",
51
- ]
52
- GPT_weight_root = [
53
- "GPT_weights",
54
- "GPT_weights_v2",
55
- "GPT_weights_v3",
56
- "GPT_weights_v4",
57
- "GPT_weights_v2Pro",
58
- "GPT_weights_v2ProPlus",
59
- ]
60
- SoVITS_weight_version2root = {
61
- "v1": "SoVITS_weights",
62
- "v2": "SoVITS_weights_v2",
63
- "v3": "SoVITS_weights_v3",
64
- "v4": "SoVITS_weights_v4",
65
- "v2Pro": "SoVITS_weights_v2Pro",
66
- "v2ProPlus": "SoVITS_weights_v2ProPlus",
67
- }
68
- GPT_weight_version2root = {
69
- "v1": "GPT_weights",
70
- "v2": "GPT_weights_v2",
71
- "v3": "GPT_weights_v3",
72
- "v4": "GPT_weights_v4",
73
- "v2Pro": "GPT_weights_v2Pro",
74
- "v2ProPlus": "GPT_weights_v2ProPlus",
75
- }
76
-
77
-
78
- def custom_sort_key(s):
79
- # 使用正则表达式提取字符串中的数字部分和非数字部分
80
- parts = re.split("(\d+)", s)
81
- # 将数字部分转换为整数,非数字部分保持不变
82
- parts = [int(part) if part.isdigit() else part for part in parts]
83
- return parts
84
-
85
-
86
- def get_weights_names():
87
- SoVITS_names = []
88
- for key in name2sovits_path:
89
- if os.path.exists(name2sovits_path[key]):
90
- SoVITS_names.append(key)
91
- for path in SoVITS_weight_root:
92
- if not os.path.exists(path):
93
- continue
94
- for name in os.listdir(path):
95
- if name.endswith(".pth"):
96
- SoVITS_names.append("%s/%s" % (path, name))
97
- if not SoVITS_names:
98
- SoVITS_names = [""]
99
- GPT_names = []
100
- for key in name2gpt_path:
101
- if os.path.exists(name2gpt_path[key]):
102
- GPT_names.append(key)
103
- for path in GPT_weight_root:
104
- if not os.path.exists(path):
105
- continue
106
- for name in os.listdir(path):
107
- if name.endswith(".ckpt"):
108
- GPT_names.append("%s/%s" % (path, name))
109
- SoVITS_names = sorted(SoVITS_names, key=custom_sort_key)
110
- GPT_names = sorted(GPT_names, key=custom_sort_key)
111
- if not GPT_names:
112
- GPT_names = [""]
113
- return SoVITS_names, GPT_names
114
-
115
-
116
- def change_choices():
117
- SoVITS_names, GPT_names = get_weights_names()
118
- return {"choices": SoVITS_names, "__type__": "update"}, {
119
- "choices": GPT_names,
120
- "__type__": "update",
121
- }
122
-
123
-
124
- # 推理用的指定模型
125
- sovits_path = ""
126
- gpt_path = ""
127
- is_half_str = os.environ.get("is_half", "True")
128
- is_half = True if is_half_str.lower() == "true" else False
129
- is_share_str = os.environ.get("is_share", "False")
130
- is_share = True if is_share_str.lower() == "true" else False
131
-
132
- cnhubert_path = "pretrained_models/chinese-hubert-base"
133
- bert_path = "pretrained_models/chinese-roberta-wwm-ext-large"
134
- pretrained_sovits_path = "pretrained_models/s2G488k.pth"
135
- pretrained_gpt_path = "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
136
-
137
- exp_root = "logs"
138
- python_exec = sys.executable or "python"
139
-
140
- webui_port_main = 9874
141
- webui_port_uvr5 = 9873
142
- webui_port_infer_tts = 9872
143
- webui_port_subfix = 9871
144
-
145
- api_port = 9880
146
-
147
-
148
- # Thanks to the contribution of @Karasukaigan and @XXXXRT666
149
- def get_device_dtype_sm(idx: int) -> tuple[torch.device, torch.dtype, float, float]:
150
- cpu = torch.device("cpu")
151
- cuda = torch.device(f"cuda:{idx}")
152
- if not torch.cuda.is_available():
153
- return cpu, torch.float32, 0.0, 0.0
154
- device_idx = idx
155
- capability = torch.cuda.get_device_capability(device_idx)
156
- name = torch.cuda.get_device_name(device_idx)
157
- mem_bytes = torch.cuda.get_device_properties(device_idx).total_memory
158
- mem_gb = mem_bytes / (1024**3) + 0.4
159
- major, minor = capability
160
- sm_version = major + minor / 10.0
161
- is_16_series = bool(re.search(r"16\d{2}", name)) and sm_version == 7.5
162
- if mem_gb < 4 or sm_version < 5.3:
163
- return cpu, torch.float32, 0.0, 0.0
164
- if sm_version == 6.1 or is_16_series == True:
165
- return cuda, torch.float32, sm_version, mem_gb
166
- if sm_version > 6.1:
167
- return cuda, torch.float16, sm_version, mem_gb
168
- return cpu, torch.float32, 0.0, 0.0
169
-
170
-
171
- IS_GPU = True
172
- GPU_INFOS: list[str] = []
173
- GPU_INDEX: set[int] = set()
174
- GPU_COUNT = torch.cuda.device_count()
175
- CPU_INFO: str = "0\tCPU " + i18n("CPU训练,较慢")
176
- tmp: list[tuple[torch.device, torch.dtype, float, float]] = []
177
- memset: set[float] = set()
178
-
179
- for i in range(max(GPU_COUNT, 1)):
180
- tmp.append(get_device_dtype_sm(i))
181
-
182
- for j in tmp:
183
- device = j[0]
184
- memset.add(j[3])
185
- if device.type != "cpu":
186
- GPU_INFOS.append(f"{device.index}\t{torch.cuda.get_device_name(device.index)}")
187
- GPU_INDEX.add(device.index)
188
-
189
- if not GPU_INFOS:
190
- IS_GPU = False
191
- GPU_INFOS.append(CPU_INFO)
192
- GPU_INDEX.add(0)
193
-
194
- infer_device = max(tmp, key=lambda x: (x[2], x[3]))[0]
195
- is_half = any(dtype == torch.float16 for _, dtype, _, _ in tmp)
196
-
197
-
198
- class Config:
199
- def __init__(self):
200
- self.sovits_path = sovits_path
201
- self.gpt_path = gpt_path
202
- self.is_half = is_half
203
-
204
- self.cnhubert_path = cnhubert_path
205
- self.bert_path = bert_path
206
- self.pretrained_sovits_path = pretrained_sovits_path
207
- self.pretrained_gpt_path = pretrained_gpt_path
208
-
209
- self.exp_root = exp_root
210
- self.python_exec = python_exec
211
- self.infer_device = infer_device
212
-
213
- self.webui_port_main = webui_port_main
214
- self.webui_port_uvr5 = webui_port_uvr5
215
- self.webui_port_infer_tts = webui_port_infer_tts
216
- self.webui_port_subfix = webui_port_subfix
217
-
218
- self.api_port = api_port
 
1
+ import os
2
+ import re
3
+ import sys
4
+
5
+ import torch
6
+
7
+ from tools.i18n.i18n import I18nAuto
8
+
9
+ i18n = I18nAuto(language=os.environ.get("language", "Auto"))
10
+
11
+
12
+ pretrained_sovits_name = {
13
+ "v1": "pretrained_models/s2G488k.pth",
14
+ "v2": "pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
15
+ "v3": "pretrained_models/s2Gv3.pth", ###v3v4还要检查vocoder,算了。。。
16
+ "v4": "pretrained_models/gsv-v4-pretrained/s2Gv4.pth",
17
+ "v2Pro": "pretrained_models/v2Pro/s2Gv2Pro.pth",
18
+ "v2ProPlus": "pretrained_models/v2Pro/s2Gv2ProPlus.pth",
19
+ }
20
+
21
+ pretrained_gpt_name = {
22
+ "v1": "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
23
+ "v2": "pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
24
+ "v3": "pretrained_models/s1v3.ckpt",
25
+ "v4": "pretrained_models/s1v3.ckpt",
26
+ "v2Pro": "pretrained_models/s1v3.ckpt",
27
+ "v2ProPlus": "pretrained_models/s1v3.ckpt",
28
+ }
29
+ name2sovits_path = {
30
+ # i18n("不训练直接推v1底模!"): "pretrained_models/s2G488k.pth",
31
+ # i18n("不训练直接推v2底模!"): "pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
32
+ # i18n("不训练直接推v3底模!"): "pretrained_models/s2Gv3.pth",
33
+ # i18n("不训练直接推v4底模!"): "pretrained_models/gsv-v4-pretrained/s2Gv4.pth",
34
+ # i18n("不训练直接推v2Pro底模!"): "pretrained_models/v2Pro/s2Gv2Pro.pth",
35
+ # i18n("不训练直接推v2ProPlus底模!"): "pretrained_models/v2Pro/s2Gv2ProPlus.pth",
36
+ }
37
+ name2gpt_path = {
38
+ # i18n("不训练直接推v1底模!"):"pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
39
+ # i18n(
40
+ # "不训练直接推v2底模!"
41
+ # ): "pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
42
+ # i18n("不训练直接推v3底模!"): "pretrained_models/s1v3.ckpt",
43
+ }
44
+ SoVITS_weight_root = [
45
+ "SoVITS_weights",
46
+ "SoVITS_weights_v2",
47
+ "SoVITS_weights_v3",
48
+ "SoVITS_weights_v4",
49
+ "SoVITS_weights_v2Pro",
50
+ "SoVITS_weights_v2ProPlus",
51
+ ]
52
+ GPT_weight_root = [
53
+ "GPT_weights",
54
+ "GPT_weights_v2",
55
+ "GPT_weights_v3",
56
+ "GPT_weights_v4",
57
+ "GPT_weights_v2Pro",
58
+ "GPT_weights_v2ProPlus",
59
+ ]
60
+ SoVITS_weight_version2root = {
61
+ "v1": "SoVITS_weights",
62
+ "v2": "SoVITS_weights_v2",
63
+ "v3": "SoVITS_weights_v3",
64
+ "v4": "SoVITS_weights_v4",
65
+ "v2Pro": "SoVITS_weights_v2Pro",
66
+ "v2ProPlus": "SoVITS_weights_v2ProPlus",
67
+ }
68
+ GPT_weight_version2root = {
69
+ "v1": "GPT_weights",
70
+ "v2": "GPT_weights_v2",
71
+ "v3": "GPT_weights_v3",
72
+ "v4": "GPT_weights_v4",
73
+ "v2Pro": "GPT_weights_v2Pro",
74
+ "v2ProPlus": "GPT_weights_v2ProPlus",
75
+ }
76
+
77
+
78
+ def custom_sort_key(s):
79
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
80
+ parts = re.split("(\d+)", s)
81
+ # 将数字部分转换为整数,非数字部分保持不变
82
+ parts = [int(part) if part.isdigit() else part for part in parts]
83
+ return parts
84
+
85
+
86
+ def get_weights_names():
87
+ SoVITS_names = []
88
+ for key in name2sovits_path:
89
+ if os.path.exists(name2sovits_path[key]):
90
+ SoVITS_names.append(key)
91
+ for path in SoVITS_weight_root:
92
+ if not os.path.exists(path):
93
+ continue
94
+ for name in os.listdir(path):
95
+ if name.endswith(".pth"):
96
+ SoVITS_names.append("%s/%s" % (path, name))
97
+ if not SoVITS_names:
98
+ SoVITS_names = [""]
99
+ GPT_names = []
100
+ for key in name2gpt_path:
101
+ if os.path.exists(name2gpt_path[key]):
102
+ GPT_names.append(key)
103
+ for path in GPT_weight_root:
104
+ if not os.path.exists(path):
105
+ continue
106
+ for name in os.listdir(path):
107
+ if name.endswith(".ckpt"):
108
+ GPT_names.append("%s/%s" % (path, name))
109
+ SoVITS_names = sorted(SoVITS_names, key=custom_sort_key)
110
+ GPT_names = sorted(GPT_names, key=custom_sort_key)
111
+ if not GPT_names:
112
+ GPT_names = [""]
113
+ return SoVITS_names, GPT_names
114
+
115
+
116
+ def change_choices():
117
+ SoVITS_names, GPT_names = get_weights_names()
118
+ return {"choices": SoVITS_names, "__type__": "update"}, {
119
+ "choices": GPT_names,
120
+ "__type__": "update",
121
+ }
122
+
123
+
124
+ # 推理用的指定模型
125
+ sovits_path = ""
126
+ gpt_path = ""
127
+ is_half_str = os.environ.get("is_half", "True")
128
+ is_half = True if is_half_str.lower() == "true" else False
129
+ is_share_str = os.environ.get("is_share", "False")
130
+ is_share = True if is_share_str.lower() == "true" else False
131
+
132
+ cnhubert_path = "pretrained_models/chinese-hubert-base"
133
+ bert_path = "pretrained_models/chinese-roberta-wwm-ext-large"
134
+ pretrained_sovits_path = "pretrained_models/s2G488k.pth"
135
+ pretrained_gpt_path = "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
136
+
137
+ exp_root = "logs"
138
+ python_exec = sys.executable or "python"
139
+
140
+ webui_port_main = 9874
141
+ webui_port_uvr5 = 9873
142
+ webui_port_infer_tts = 9872
143
+ webui_port_subfix = 9871
144
+
145
+ api_port = 9880
146
+
147
+
148
+ # Thanks to the contribution of @Karasukaigan and @XXXXRT666
149
+ def get_device_dtype_sm(idx: int) -> tuple[torch.device, torch.dtype, float, float]:
150
+ cpu = torch.device("cpu")
151
+ cuda = torch.device(f"cuda:{idx}")
152
+ if not torch.cuda.is_available():
153
+ return cpu, torch.float32, 0.0, 0.0
154
+ device_idx = idx
155
+ capability = torch.cuda.get_device_capability(device_idx)
156
+ name = torch.cuda.get_device_name(device_idx)
157
+ mem_bytes = torch.cuda.get_device_properties(device_idx).total_memory
158
+ mem_gb = mem_bytes / (1024**3) + 0.4
159
+ major, minor = capability
160
+ sm_version = major + minor / 10.0
161
+ is_16_series = bool(re.search(r"16\d{2}", name)) and sm_version == 7.5
162
+ if mem_gb < 4 or sm_version < 5.3:
163
+ return cpu, torch.float32, 0.0, 0.0
164
+ if sm_version == 6.1 or is_16_series == True:
165
+ return cuda, torch.float32, sm_version, mem_gb
166
+ if sm_version > 6.1:
167
+ return cuda, torch.float16, sm_version, mem_gb
168
+ return cpu, torch.float32, 0.0, 0.0
169
+
170
+
171
+ IS_GPU = True
172
+ GPU_INFOS: list[str] = []
173
+ GPU_INDEX: set[int] = set()
174
+ GPU_COUNT = torch.cuda.device_count()
175
+ CPU_INFO: str = "0\tCPU " + i18n("CPU训练,较慢")
176
+ tmp: list[tuple[torch.device, torch.dtype, float, float]] = []
177
+ memset: set[float] = set()
178
+
179
+ for i in range(max(GPU_COUNT, 1)):
180
+ tmp.append(get_device_dtype_sm(i))
181
+
182
+ for j in tmp:
183
+ device = j[0]
184
+ memset.add(j[3])
185
+ if device.type != "cpu":
186
+ GPU_INFOS.append(f"{device.index}\t{torch.cuda.get_device_name(device.index)}")
187
+ GPU_INDEX.add(device.index)
188
+
189
+ if not GPU_INFOS:
190
+ IS_GPU = False
191
+ GPU_INFOS.append(CPU_INFO)
192
+ GPU_INDEX.add(0)
193
+
194
+ infer_device = max(tmp, key=lambda x: (x[2], x[3]))[0]
195
+ is_half = any(dtype == torch.float16 for _, dtype, _, _ in tmp)
196
+
197
+
198
+ class Config:
199
+ def __init__(self):
200
+ self.sovits_path = sovits_path
201
+ self.gpt_path = gpt_path
202
+ self.is_half = is_half
203
+
204
+ self.cnhubert_path = cnhubert_path
205
+ self.bert_path = bert_path
206
+ self.pretrained_sovits_path = pretrained_sovits_path
207
+ self.pretrained_gpt_path = pretrained_gpt_path
208
+
209
+ self.exp_root = exp_root
210
+ self.python_exec = python_exec
211
+ self.infer_device = infer_device
212
+
213
+ self.webui_port_main = webui_port_main
214
+ self.webui_port_uvr5 = webui_port_uvr5
215
+ self.webui_port_infer_tts = webui_port_infer_tts
216
+ self.webui_port_subfix = webui_port_subfix
217
+
218
+ self.api_port = api_port