|
{ |
|
"best_metric": 0.9706230241510936, |
|
"best_model_checkpoint": "/home/ddo37/Security-Project/scripts/finetune/output/checkpoints/checkpoint-1856", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 2320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.43103448275862066, |
|
"grad_norm": 11.954014778137207, |
|
"learning_rate": 3.5344827586206898e-06, |
|
"loss": 1.8299, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8620689655172413, |
|
"grad_norm": 2.8888251781463623, |
|
"learning_rate": 7.844827586206897e-06, |
|
"loss": 0.7131, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8713592233009708, |
|
"eval_f1": 0.8631315005994362, |
|
"eval_loss": 0.3847014009952545, |
|
"eval_runtime": 1.2398, |
|
"eval_samples_per_second": 664.646, |
|
"eval_steps_per_second": 20.972, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.293103448275862, |
|
"grad_norm": 2.661858558654785, |
|
"learning_rate": 9.760536398467434e-06, |
|
"loss": 0.3837, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.7241379310344827, |
|
"grad_norm": 2.5226521492004395, |
|
"learning_rate": 9.281609195402299e-06, |
|
"loss": 0.2957, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9344660194174758, |
|
"eval_f1": 0.9309781897227271, |
|
"eval_loss": 0.22864548861980438, |
|
"eval_runtime": 1.2773, |
|
"eval_samples_per_second": 645.087, |
|
"eval_steps_per_second": 20.355, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 2.1551724137931036, |
|
"grad_norm": 5.212111473083496, |
|
"learning_rate": 8.802681992337166e-06, |
|
"loss": 0.2293, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.586206896551724, |
|
"grad_norm": 3.2719788551330566, |
|
"learning_rate": 8.323754789272031e-06, |
|
"loss": 0.2061, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9453883495145631, |
|
"eval_f1": 0.9428782627566964, |
|
"eval_loss": 0.17873604595661163, |
|
"eval_runtime": 1.2824, |
|
"eval_samples_per_second": 642.533, |
|
"eval_steps_per_second": 20.274, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 3.0172413793103448, |
|
"grad_norm": 3.8508994579315186, |
|
"learning_rate": 7.844827586206897e-06, |
|
"loss": 0.2151, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.4482758620689653, |
|
"grad_norm": 1.9576607942581177, |
|
"learning_rate": 7.365900383141763e-06, |
|
"loss": 0.1763, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.8793103448275863, |
|
"grad_norm": 9.453478813171387, |
|
"learning_rate": 6.886973180076629e-06, |
|
"loss": 0.1558, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9538834951456311, |
|
"eval_f1": 0.9512711549345356, |
|
"eval_loss": 0.14712262153625488, |
|
"eval_runtime": 1.2772, |
|
"eval_samples_per_second": 645.171, |
|
"eval_steps_per_second": 20.357, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 4.310344827586207, |
|
"grad_norm": 1.4285823106765747, |
|
"learning_rate": 6.408045977011495e-06, |
|
"loss": 0.1631, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.741379310344827, |
|
"grad_norm": 5.883343696594238, |
|
"learning_rate": 5.933908045977011e-06, |
|
"loss": 0.1504, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9611650485436893, |
|
"eval_f1": 0.9599362299707782, |
|
"eval_loss": 0.12942931056022644, |
|
"eval_runtime": 1.2731, |
|
"eval_samples_per_second": 647.238, |
|
"eval_steps_per_second": 20.423, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 5.172413793103448, |
|
"grad_norm": 3.6885058879852295, |
|
"learning_rate": 5.454980842911878e-06, |
|
"loss": 0.125, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 5.603448275862069, |
|
"grad_norm": 2.8290116786956787, |
|
"learning_rate": 4.976053639846744e-06, |
|
"loss": 0.125, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9635922330097088, |
|
"eval_f1": 0.9621841535418691, |
|
"eval_loss": 0.11738289892673492, |
|
"eval_runtime": 1.2789, |
|
"eval_samples_per_second": 644.281, |
|
"eval_steps_per_second": 20.329, |
|
"step": 1392 |
|
}, |
|
{ |
|
"epoch": 6.0344827586206895, |
|
"grad_norm": 0.3815958499908447, |
|
"learning_rate": 4.49712643678161e-06, |
|
"loss": 0.1202, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 6.4655172413793105, |
|
"grad_norm": 3.272099018096924, |
|
"learning_rate": 4.0181992337164755e-06, |
|
"loss": 0.1118, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 6.896551724137931, |
|
"grad_norm": 4.3483662605285645, |
|
"learning_rate": 3.5392720306513413e-06, |
|
"loss": 0.1184, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9660194174757282, |
|
"eval_f1": 0.9665106319184217, |
|
"eval_loss": 0.10380454361438751, |
|
"eval_runtime": 1.2789, |
|
"eval_samples_per_second": 644.297, |
|
"eval_steps_per_second": 20.33, |
|
"step": 1624 |
|
}, |
|
{ |
|
"epoch": 7.327586206896552, |
|
"grad_norm": 0.32656994462013245, |
|
"learning_rate": 3.0603448275862068e-06, |
|
"loss": 0.1011, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 7.758620689655173, |
|
"grad_norm": 6.692719459533691, |
|
"learning_rate": 2.581417624521073e-06, |
|
"loss": 0.0889, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.970873786407767, |
|
"eval_f1": 0.9706230241510936, |
|
"eval_loss": 0.09331130981445312, |
|
"eval_runtime": 1.2861, |
|
"eval_samples_per_second": 640.719, |
|
"eval_steps_per_second": 20.217, |
|
"step": 1856 |
|
}, |
|
{ |
|
"epoch": 8.189655172413794, |
|
"grad_norm": 5.316383361816406, |
|
"learning_rate": 2.102490421455939e-06, |
|
"loss": 0.0904, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 8.620689655172415, |
|
"grad_norm": 1.0274827480316162, |
|
"learning_rate": 1.623563218390805e-06, |
|
"loss": 0.1022, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.970873786407767, |
|
"eval_f1": 0.9700692044603826, |
|
"eval_loss": 0.09119965136051178, |
|
"eval_runtime": 1.2882, |
|
"eval_samples_per_second": 639.669, |
|
"eval_steps_per_second": 20.184, |
|
"step": 2088 |
|
}, |
|
{ |
|
"epoch": 9.051724137931034, |
|
"grad_norm": 4.100451946258545, |
|
"learning_rate": 1.1494252873563219e-06, |
|
"loss": 0.0927, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 9.482758620689655, |
|
"grad_norm": 3.5858731269836426, |
|
"learning_rate": 6.752873563218392e-07, |
|
"loss": 0.0757, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 9.913793103448276, |
|
"grad_norm": 4.313425064086914, |
|
"learning_rate": 1.96360153256705e-07, |
|
"loss": 0.0988, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9696601941747572, |
|
"eval_f1": 0.9687676005765485, |
|
"eval_loss": 0.08939822018146515, |
|
"eval_runtime": 1.2896, |
|
"eval_samples_per_second": 638.946, |
|
"eval_steps_per_second": 20.161, |
|
"step": 2320 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2320, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9835957818753024.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|