sophia / trainer_state.json
Alignment-Lab-AI's picture
falcone 7b
ab10cc5
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.742857142857143,
"global_step": 24,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"learning_rate": 4.9786121534345265e-05,
"loss": 2.3656,
"step": 1
},
{
"epoch": 0.23,
"learning_rate": 4.914814565722671e-05,
"loss": 2.3106,
"step": 2
},
{
"epoch": 0.34,
"learning_rate": 4.8096988312782174e-05,
"loss": 2.2921,
"step": 3
},
{
"epoch": 0.46,
"learning_rate": 4.665063509461097e-05,
"loss": 2.1831,
"step": 4
},
{
"epoch": 0.57,
"learning_rate": 4.4833833507280884e-05,
"loss": 2.2563,
"step": 5
},
{
"epoch": 0.69,
"learning_rate": 4.267766952966369e-05,
"loss": 2.2585,
"step": 6
},
{
"epoch": 0.8,
"learning_rate": 4.021903572521802e-05,
"loss": 2.3297,
"step": 7
},
{
"epoch": 0.91,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.2337,
"step": 8
},
{
"epoch": 1.03,
"learning_rate": 3.456708580912725e-05,
"loss": 2.2176,
"step": 9
},
{
"epoch": 1.14,
"learning_rate": 3.147047612756302e-05,
"loss": 2.2138,
"step": 10
},
{
"epoch": 1.26,
"learning_rate": 2.8263154805501297e-05,
"loss": 2.23,
"step": 11
},
{
"epoch": 1.37,
"learning_rate": 2.5e-05,
"loss": 2.2902,
"step": 12
},
{
"epoch": 1.49,
"learning_rate": 2.173684519449872e-05,
"loss": 2.2732,
"step": 13
},
{
"epoch": 1.6,
"learning_rate": 1.852952387243698e-05,
"loss": 2.2587,
"step": 14
},
{
"epoch": 1.71,
"learning_rate": 1.5432914190872757e-05,
"loss": 2.1618,
"step": 15
},
{
"epoch": 1.83,
"learning_rate": 1.2500000000000006e-05,
"loss": 2.2363,
"step": 16
},
{
"epoch": 1.94,
"learning_rate": 9.780964274781984e-06,
"loss": 2.2207,
"step": 17
},
{
"epoch": 2.06,
"learning_rate": 7.3223304703363135e-06,
"loss": 2.2272,
"step": 18
},
{
"epoch": 2.17,
"learning_rate": 5.166166492719124e-06,
"loss": 2.2384,
"step": 19
},
{
"epoch": 2.29,
"learning_rate": 3.3493649053890326e-06,
"loss": 2.1726,
"step": 20
},
{
"epoch": 2.4,
"learning_rate": 1.9030116872178316e-06,
"loss": 2.1893,
"step": 21
},
{
"epoch": 2.51,
"learning_rate": 8.51854342773295e-07,
"loss": 2.2168,
"step": 22
},
{
"epoch": 2.63,
"learning_rate": 2.1387846565474045e-07,
"loss": 2.2572,
"step": 23
},
{
"epoch": 2.74,
"learning_rate": 0.0,
"loss": 2.2699,
"step": 24
},
{
"epoch": 2.74,
"step": 24,
"total_flos": 7.741968380801843e+16,
"train_loss": 2.2459669013818107,
"train_runtime": 103.0683,
"train_samples_per_second": 32.425,
"train_steps_per_second": 0.233
}
],
"max_steps": 24,
"num_train_epochs": 3,
"total_flos": 7.741968380801843e+16,
"trial_name": null,
"trial_params": null
}