{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.3773314203730274, | |
"eval_steps": 500, | |
"global_step": 60, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.02, | |
"learning_rate": 5e-05, | |
"loss": 1.7667, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 5e-05, | |
"loss": 1.7274, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 5e-05, | |
"loss": 1.656, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 5e-05, | |
"loss": 1.7964, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 5e-05, | |
"loss": 1.6153, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 5e-05, | |
"loss": 1.7931, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 5e-05, | |
"loss": 1.7548, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 5e-05, | |
"loss": 1.7054, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 5e-05, | |
"loss": 1.7311, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 5e-05, | |
"loss": 1.6827, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 5e-05, | |
"loss": 1.7158, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 5e-05, | |
"loss": 1.6599, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 5e-05, | |
"loss": 1.6991, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 5e-05, | |
"loss": 1.6628, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 5e-05, | |
"loss": 1.6188, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 5e-05, | |
"loss": 1.6868, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 5e-05, | |
"loss": 1.6854, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 5e-05, | |
"loss": 1.5938, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 5e-05, | |
"loss": 1.6603, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 5e-05, | |
"loss": 1.561, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 5e-05, | |
"loss": 1.6162, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 5e-05, | |
"loss": 1.6159, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 5e-05, | |
"loss": 1.6414, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 5e-05, | |
"loss": 1.6915, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 5e-05, | |
"loss": 1.6853, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 5e-05, | |
"loss": 1.6563, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 5e-05, | |
"loss": 1.6622, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 5e-05, | |
"loss": 1.638, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 5e-05, | |
"loss": 1.66, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 5e-05, | |
"loss": 1.6565, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 5e-05, | |
"loss": 1.6076, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 5e-05, | |
"loss": 1.6971, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 5e-05, | |
"loss": 1.6836, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 5e-05, | |
"loss": 1.587, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 5e-05, | |
"loss": 1.7034, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 5e-05, | |
"loss": 1.705, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 5e-05, | |
"loss": 1.5491, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 5e-05, | |
"loss": 1.611, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 5e-05, | |
"loss": 1.6795, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 5e-05, | |
"loss": 1.5823, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 5e-05, | |
"loss": 1.6066, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 5e-05, | |
"loss": 1.6279, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 5e-05, | |
"loss": 1.5756, | |
"step": 43 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 5e-05, | |
"loss": 1.6426, | |
"step": 44 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 5e-05, | |
"loss": 1.6566, | |
"step": 45 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 5e-05, | |
"loss": 1.6325, | |
"step": 46 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 5e-05, | |
"loss": 1.7233, | |
"step": 47 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 5e-05, | |
"loss": 1.5899, | |
"step": 48 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 5e-05, | |
"loss": 1.5425, | |
"step": 49 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 5e-05, | |
"loss": 1.4761, | |
"step": 50 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 5e-05, | |
"loss": 1.6153, | |
"step": 51 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 5e-05, | |
"loss": 1.6099, | |
"step": 52 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 5e-05, | |
"loss": 1.6017, | |
"step": 53 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 5e-05, | |
"loss": 1.5573, | |
"step": 54 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 5e-05, | |
"loss": 1.5365, | |
"step": 55 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 5e-05, | |
"loss": 1.6471, | |
"step": 56 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 5e-05, | |
"loss": 1.5963, | |
"step": 57 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 5e-05, | |
"loss": 1.6854, | |
"step": 58 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 5e-05, | |
"loss": 1.5597, | |
"step": 59 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 5e-05, | |
"loss": 1.66, | |
"step": 60 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 129, | |
"num_train_epochs": 3, | |
"save_steps": 10, | |
"total_flos": 2.660329050144768e+17, | |
"trial_name": null, | |
"trial_params": null | |
} | |