Spaces:
Configuration error
Configuration error
unknown
commited on
Commit
·
0f2a923
1
Parent(s):
2eae16b
add settings
Browse files
src/f5_tts/train/finetune_cli.py
CHANGED
@@ -89,7 +89,11 @@ def main():
|
|
89 |
if args.finetune:
|
90 |
if not os.path.isdir(checkpoint_path):
|
91 |
os.makedirs(checkpoint_path, exist_ok=True)
|
92 |
-
|
|
|
|
|
|
|
|
|
93 |
|
94 |
# Use the tokenizer and tokenizer_path provided in the command line arguments
|
95 |
tokenizer = args.tokenizer
|
|
|
89 |
if args.finetune:
|
90 |
if not os.path.isdir(checkpoint_path):
|
91 |
os.makedirs(checkpoint_path, exist_ok=True)
|
92 |
+
|
93 |
+
file_checkpoint = os.path.join(checkpoint_path, os.path.basename(ckpt_path))
|
94 |
+
if os.path.isfile(file_checkpoint) == False:
|
95 |
+
shutil.copy2(ckpt_path, file_checkpoint)
|
96 |
+
print("copy checkpoint for finetune")
|
97 |
|
98 |
# Use the tokenizer and tokenizer_path provided in the command line arguments
|
99 |
tokenizer = args.tokenizer
|
src/f5_tts/train/finetune_gradio.py
CHANGED
@@ -46,6 +46,119 @@ device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is
|
|
46 |
pipe = None
|
47 |
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
# Load metadata
|
50 |
def get_audio_duration(audio_path):
|
51 |
"""Calculate the duration of an audio file."""
|
@@ -330,6 +443,26 @@ def start_training(
|
|
330 |
|
331 |
print(cmd)
|
332 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
try:
|
334 |
# Start the training process
|
335 |
training_process = subprocess.Popen(cmd, shell=True)
|
@@ -1225,6 +1358,42 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle
|
|
1225 |
start_button = gr.Button("Start Training")
|
1226 |
stop_button = gr.Button("Stop Training", interactive=False)
|
1227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1228 |
txt_info_train = gr.Text(label="info", value="")
|
1229 |
start_button.click(
|
1230 |
fn=start_training,
|
@@ -1279,6 +1448,29 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle
|
|
1279 |
check_finetune, inputs=[ch_finetune], outputs=[file_checkpoint_train, tokenizer_file, tokenizer_type]
|
1280 |
)
|
1281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1282 |
with gr.TabItem("test model"):
|
1283 |
exp_name = gr.Radio(label="Model", choices=["F5-TTS", "E2-TTS"], value="F5-TTS")
|
1284 |
list_checkpoints, checkpoint_select = get_checkpoints_project(projects_selelect, False)
|
|
|
46 |
pipe = None
|
47 |
|
48 |
|
49 |
+
# Save settings from a JSON file
|
50 |
+
def save_settings(
|
51 |
+
project_name,
|
52 |
+
exp_name,
|
53 |
+
learning_rate,
|
54 |
+
batch_size_per_gpu,
|
55 |
+
batch_size_type,
|
56 |
+
max_samples,
|
57 |
+
grad_accumulation_steps,
|
58 |
+
max_grad_norm,
|
59 |
+
epochs,
|
60 |
+
num_warmup_updates,
|
61 |
+
save_per_updates,
|
62 |
+
last_per_steps,
|
63 |
+
finetune,
|
64 |
+
file_checkpoint_train,
|
65 |
+
tokenizer_type,
|
66 |
+
tokenizer_file,
|
67 |
+
mixed_precision,
|
68 |
+
):
|
69 |
+
path_project = os.path.join(path_project_ckpts, project_name)
|
70 |
+
os.makedirs(path_project, exist_ok=True)
|
71 |
+
file_setting = os.path.join(path_project, "setting.json")
|
72 |
+
|
73 |
+
settings = {
|
74 |
+
"exp_name": exp_name,
|
75 |
+
"learning_rate": learning_rate,
|
76 |
+
"batch_size_per_gpu": batch_size_per_gpu,
|
77 |
+
"batch_size_type": batch_size_type,
|
78 |
+
"max_samples": max_samples,
|
79 |
+
"grad_accumulation_steps": grad_accumulation_steps,
|
80 |
+
"max_grad_norm": max_grad_norm,
|
81 |
+
"epochs": epochs,
|
82 |
+
"num_warmup_updates": num_warmup_updates,
|
83 |
+
"save_per_updates": save_per_updates,
|
84 |
+
"last_per_steps": last_per_steps,
|
85 |
+
"finetune": finetune,
|
86 |
+
"file_checkpoint_train": file_checkpoint_train,
|
87 |
+
"tokenizer_type": tokenizer_type,
|
88 |
+
"tokenizer_file": tokenizer_file,
|
89 |
+
"mixed_precision": mixed_precision,
|
90 |
+
}
|
91 |
+
with open(file_setting, "w") as f:
|
92 |
+
json.dump(settings, f, indent=4)
|
93 |
+
return "Settings saved!"
|
94 |
+
|
95 |
+
|
96 |
+
# Load settings from a JSON file
|
97 |
+
def load_settings(project_name):
|
98 |
+
project_name = project_name.replace("_pinyin", "").replace("_char", "")
|
99 |
+
path_project = os.path.join(path_project_ckpts, project_name)
|
100 |
+
file_setting = os.path.join(path_project, "setting.json")
|
101 |
+
|
102 |
+
if os.path.isfile(file_setting) == False:
|
103 |
+
settings = {
|
104 |
+
"exp_name": "F5TTS_Base",
|
105 |
+
"learning_rate": 1e-05,
|
106 |
+
"batch_size_per_gpu": 1000,
|
107 |
+
"batch_size_type": "frame",
|
108 |
+
"max_samples": 64,
|
109 |
+
"grad_accumulation_steps": 1,
|
110 |
+
"max_grad_norm": 1,
|
111 |
+
"epochs": 100,
|
112 |
+
"num_warmup_updates": 2,
|
113 |
+
"save_per_updates": 300,
|
114 |
+
"last_per_steps": 200,
|
115 |
+
"finetune": True,
|
116 |
+
"file_checkpoint_train": "",
|
117 |
+
"tokenizer_type": "pinyin",
|
118 |
+
"tokenizer_file": "",
|
119 |
+
"mixed_precision": "none",
|
120 |
+
}
|
121 |
+
return (
|
122 |
+
settings["exp_name"],
|
123 |
+
settings["learning_rate"],
|
124 |
+
settings["batch_size_per_gpu"],
|
125 |
+
settings["batch_size_type"],
|
126 |
+
settings["max_samples"],
|
127 |
+
settings["grad_accumulation_steps"],
|
128 |
+
settings["max_grad_norm"],
|
129 |
+
settings["epochs"],
|
130 |
+
settings["num_warmup_updates"],
|
131 |
+
settings["save_per_updates"],
|
132 |
+
settings["last_per_steps"],
|
133 |
+
settings["finetune"],
|
134 |
+
settings["file_checkpoint_train"],
|
135 |
+
settings["tokenizer_type"],
|
136 |
+
settings["tokenizer_file"],
|
137 |
+
settings["mixed_precision"],
|
138 |
+
)
|
139 |
+
|
140 |
+
with open(file_setting, "r") as f:
|
141 |
+
settings = json.load(f)
|
142 |
+
return (
|
143 |
+
settings["exp_name"],
|
144 |
+
settings["learning_rate"],
|
145 |
+
settings["batch_size_per_gpu"],
|
146 |
+
settings["batch_size_type"],
|
147 |
+
settings["max_samples"],
|
148 |
+
settings["grad_accumulation_steps"],
|
149 |
+
settings["max_grad_norm"],
|
150 |
+
settings["epochs"],
|
151 |
+
settings["num_warmup_updates"],
|
152 |
+
settings["save_per_updates"],
|
153 |
+
settings["last_per_steps"],
|
154 |
+
settings["finetune"],
|
155 |
+
settings["file_checkpoint_train"],
|
156 |
+
settings["tokenizer_type"],
|
157 |
+
settings["tokenizer_file"],
|
158 |
+
settings["mixed_precision"],
|
159 |
+
)
|
160 |
+
|
161 |
+
|
162 |
# Load metadata
|
163 |
def get_audio_duration(audio_path):
|
164 |
"""Calculate the duration of an audio file."""
|
|
|
443 |
|
444 |
print(cmd)
|
445 |
|
446 |
+
save_settings(
|
447 |
+
dataset_name,
|
448 |
+
exp_name,
|
449 |
+
learning_rate,
|
450 |
+
batch_size_per_gpu,
|
451 |
+
batch_size_type,
|
452 |
+
max_samples,
|
453 |
+
grad_accumulation_steps,
|
454 |
+
max_grad_norm,
|
455 |
+
epochs,
|
456 |
+
num_warmup_updates,
|
457 |
+
save_per_updates,
|
458 |
+
last_per_steps,
|
459 |
+
finetune,
|
460 |
+
file_checkpoint_train,
|
461 |
+
tokenizer_type,
|
462 |
+
tokenizer_file,
|
463 |
+
mixed_precision,
|
464 |
+
)
|
465 |
+
|
466 |
try:
|
467 |
# Start the training process
|
468 |
training_process = subprocess.Popen(cmd, shell=True)
|
|
|
1358 |
start_button = gr.Button("Start Training")
|
1359 |
stop_button = gr.Button("Stop Training", interactive=False)
|
1360 |
|
1361 |
+
if projects_selelect is not None:
|
1362 |
+
(
|
1363 |
+
exp_namev,
|
1364 |
+
learning_ratev,
|
1365 |
+
batch_size_per_gpuv,
|
1366 |
+
batch_size_typev,
|
1367 |
+
max_samplesv,
|
1368 |
+
grad_accumulation_stepsv,
|
1369 |
+
max_grad_normv,
|
1370 |
+
epochsv,
|
1371 |
+
num_warmupv_updatesv,
|
1372 |
+
save_per_updatesv,
|
1373 |
+
last_per_stepsv,
|
1374 |
+
finetunev,
|
1375 |
+
file_checkpoint_trainv,
|
1376 |
+
tokenizer_typev,
|
1377 |
+
tokenizer_filev,
|
1378 |
+
mixed_precisionv,
|
1379 |
+
) = load_settings(projects_selelect)
|
1380 |
+
exp_name.value = exp_namev
|
1381 |
+
learning_rate.value = learning_ratev
|
1382 |
+
batch_size_per_gpu.value = batch_size_per_gpuv
|
1383 |
+
batch_size_type.value = batch_size_typev
|
1384 |
+
max_samples.value = max_samplesv
|
1385 |
+
grad_accumulation_steps.value = grad_accumulation_stepsv
|
1386 |
+
max_grad_norm.value = max_grad_normv
|
1387 |
+
epochs.value = epochsv
|
1388 |
+
num_warmup_updates.value = num_warmupv_updatesv
|
1389 |
+
save_per_updates.value = save_per_updatesv
|
1390 |
+
last_per_steps.value = last_per_stepsv
|
1391 |
+
ch_finetune.value = finetunev
|
1392 |
+
file_checkpoint_train.value = file_checkpoint_train
|
1393 |
+
tokenizer_type.value = tokenizer_typev
|
1394 |
+
tokenizer_file.value = tokenizer_filev
|
1395 |
+
mixed_precision.value = mixed_precisionv
|
1396 |
+
|
1397 |
txt_info_train = gr.Text(label="info", value="")
|
1398 |
start_button.click(
|
1399 |
fn=start_training,
|
|
|
1448 |
check_finetune, inputs=[ch_finetune], outputs=[file_checkpoint_train, tokenizer_file, tokenizer_type]
|
1449 |
)
|
1450 |
|
1451 |
+
cm_project.change(
|
1452 |
+
fn=load_settings,
|
1453 |
+
inputs=[cm_project],
|
1454 |
+
outputs=[
|
1455 |
+
exp_name,
|
1456 |
+
learning_rate,
|
1457 |
+
batch_size_per_gpu,
|
1458 |
+
batch_size_type,
|
1459 |
+
max_samples,
|
1460 |
+
grad_accumulation_steps,
|
1461 |
+
max_grad_norm,
|
1462 |
+
epochs,
|
1463 |
+
num_warmup_updates,
|
1464 |
+
save_per_updates,
|
1465 |
+
last_per_steps,
|
1466 |
+
ch_finetune,
|
1467 |
+
file_checkpoint_train,
|
1468 |
+
tokenizer_type,
|
1469 |
+
tokenizer_file,
|
1470 |
+
mixed_precision,
|
1471 |
+
],
|
1472 |
+
)
|
1473 |
+
|
1474 |
with gr.TabItem("test model"):
|
1475 |
exp_name = gr.Radio(label="Model", choices=["F5-TTS", "E2-TTS"], value="F5-TTS")
|
1476 |
list_checkpoints, checkpoint_select = get_checkpoints_project(projects_selelect, False)
|