taicheng's picture
Model save
a5c0320 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010416666666666666,
"grad_norm": 654.362555910497,
"learning_rate": 3.12809937842653e-08,
"logits/chosen": -2.590585231781006,
"logits/rejected": -2.5664222240448,
"logps/chosen": -80.29847717285156,
"logps/rejected": -53.10200881958008,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.10416666666666667,
"grad_norm": 645.8509361582794,
"learning_rate": 3.12809937842653e-07,
"logits/chosen": -2.5562191009521484,
"logits/rejected": -2.538214683532715,
"logps/chosen": -87.91580200195312,
"logps/rejected": -81.03489685058594,
"loss": 0.7015,
"rewards/accuracies": 0.2083333283662796,
"rewards/chosen": -0.0006295897765085101,
"rewards/margins": -0.004218282178044319,
"rewards/rejected": 0.0035886918194592,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 409.9688749308626,
"learning_rate": 6.25619875685306e-07,
"logits/chosen": -2.6103761196136475,
"logits/rejected": -2.5612125396728516,
"logps/chosen": -102.897216796875,
"logps/rejected": -89.5175552368164,
"loss": 0.6684,
"rewards/accuracies": 0.34375,
"rewards/chosen": 0.4510021209716797,
"rewards/margins": 0.08641182631254196,
"rewards/rejected": 0.3645903468132019,
"step": 20
},
{
"epoch": 0.3125,
"grad_norm": 719.6259537665483,
"learning_rate": 5.89246627098951e-07,
"logits/chosen": -2.508330821990967,
"logits/rejected": -2.5230329036712646,
"logps/chosen": -67.4363784790039,
"logps/rejected": -75.49668884277344,
"loss": 0.7456,
"rewards/accuracies": 0.34375,
"rewards/chosen": 0.000880239880643785,
"rewards/margins": 0.5682423710823059,
"rewards/rejected": -0.5673621892929077,
"step": 30
},
{
"epoch": 0.4166666666666667,
"grad_norm": 505.0085449643877,
"learning_rate": 5.52873378512596e-07,
"logits/chosen": -2.5900540351867676,
"logits/rejected": -2.578765869140625,
"logps/chosen": -72.26093292236328,
"logps/rejected": -71.128173828125,
"loss": 0.7782,
"rewards/accuracies": 0.25,
"rewards/chosen": -0.113719142973423,
"rewards/margins": 0.29866155982017517,
"rewards/rejected": -0.41238075494766235,
"step": 40
},
{
"epoch": 0.5208333333333334,
"grad_norm": 607.5189289574854,
"learning_rate": 5.16500129926241e-07,
"logits/chosen": -2.5216305255889893,
"logits/rejected": -2.533747911453247,
"logps/chosen": -49.09285354614258,
"logps/rejected": -57.471824645996094,
"loss": 0.8133,
"rewards/accuracies": 0.23749999701976776,
"rewards/chosen": 1.7973496913909912,
"rewards/margins": 0.3687282204627991,
"rewards/rejected": 1.428621530532837,
"step": 50
},
{
"epoch": 0.625,
"grad_norm": 445.5346691700546,
"learning_rate": 4.80126881339886e-07,
"logits/chosen": -2.6302380561828613,
"logits/rejected": -2.6127591133117676,
"logps/chosen": -75.98055267333984,
"logps/rejected": -76.88851165771484,
"loss": 0.7692,
"rewards/accuracies": 0.3187499940395355,
"rewards/chosen": 3.0699806213378906,
"rewards/margins": 0.635765016078949,
"rewards/rejected": 2.434215784072876,
"step": 60
},
{
"epoch": 0.7291666666666666,
"grad_norm": 609.003215803196,
"learning_rate": 4.43753632753531e-07,
"logits/chosen": -2.626288890838623,
"logits/rejected": -2.619006633758545,
"logps/chosen": -93.36027526855469,
"logps/rejected": -83.28195190429688,
"loss": 0.7848,
"rewards/accuracies": 0.36250001192092896,
"rewards/chosen": 2.9665780067443848,
"rewards/margins": 0.811475932598114,
"rewards/rejected": 2.155101776123047,
"step": 70
},
{
"epoch": 0.8333333333333334,
"grad_norm": 832.764897592166,
"learning_rate": 4.07380384167176e-07,
"logits/chosen": -2.610133647918701,
"logits/rejected": -2.5608553886413574,
"logps/chosen": -83.00022888183594,
"logps/rejected": -77.9175033569336,
"loss": 0.7948,
"rewards/accuracies": 0.4375,
"rewards/chosen": 2.628972053527832,
"rewards/margins": 1.1728503704071045,
"rewards/rejected": 1.456121802330017,
"step": 80
},
{
"epoch": 0.9375,
"grad_norm": 670.0683208538239,
"learning_rate": 3.71007135580821e-07,
"logits/chosen": -2.521639347076416,
"logits/rejected": -2.5168137550354004,
"logps/chosen": -52.778839111328125,
"logps/rejected": -62.82239532470703,
"loss": 0.8131,
"rewards/accuracies": 0.24375000596046448,
"rewards/chosen": 1.1937518119812012,
"rewards/margins": 0.5518778562545776,
"rewards/rejected": 0.6418739557266235,
"step": 90
},
{
"epoch": 1.0416666666666667,
"grad_norm": 54.34377536034673,
"learning_rate": 3.3463388699446595e-07,
"logits/chosen": -2.515855312347412,
"logits/rejected": -2.4969446659088135,
"logps/chosen": -68.2948989868164,
"logps/rejected": -73.43537902832031,
"loss": 0.6244,
"rewards/accuracies": 0.38749998807907104,
"rewards/chosen": 5.195466041564941,
"rewards/margins": 6.597257137298584,
"rewards/rejected": -1.401790738105774,
"step": 100
},
{
"epoch": 1.0416666666666667,
"eval_logits/chosen": -2.5561327934265137,
"eval_logits/rejected": -2.540764093399048,
"eval_logps/chosen": -72.52857208251953,
"eval_logps/rejected": -79.90425109863281,
"eval_loss": 0.8358556032180786,
"eval_rewards/accuracies": 0.3333333432674408,
"eval_rewards/chosen": 1.9508284330368042,
"eval_rewards/margins": 0.7341080904006958,
"eval_rewards/rejected": 1.2167203426361084,
"eval_runtime": 113.77,
"eval_samples_per_second": 17.579,
"eval_steps_per_second": 0.554,
"step": 100
},
{
"epoch": 1.1458333333333333,
"grad_norm": 89.01359829903859,
"learning_rate": 2.9826063840811097e-07,
"logits/chosen": -2.5290496349334717,
"logits/rejected": -2.5576484203338623,
"logps/chosen": -57.967750549316406,
"logps/rejected": -86.81826782226562,
"loss": 0.369,
"rewards/accuracies": 0.48124998807907104,
"rewards/chosen": 7.356253623962402,
"rewards/margins": 12.658203125,
"rewards/rejected": -5.301949501037598,
"step": 110
},
{
"epoch": 1.25,
"grad_norm": 36.517350799999726,
"learning_rate": 2.61887389821756e-07,
"logits/chosen": -2.585951566696167,
"logits/rejected": -2.5552000999450684,
"logps/chosen": -96.2650146484375,
"logps/rejected": -101.90721130371094,
"loss": 0.3807,
"rewards/accuracies": 0.581250011920929,
"rewards/chosen": 11.669889450073242,
"rewards/margins": 20.189105987548828,
"rewards/rejected": -8.519216537475586,
"step": 120
},
{
"epoch": 1.3541666666666667,
"grad_norm": 210.1258882273698,
"learning_rate": 2.2551414123540096e-07,
"logits/chosen": -2.513514757156372,
"logits/rejected": -2.5159261226654053,
"logps/chosen": -78.0460433959961,
"logps/rejected": -96.77547454833984,
"loss": 0.3889,
"rewards/accuracies": 0.46875,
"rewards/chosen": 8.468559265136719,
"rewards/margins": 15.862611770629883,
"rewards/rejected": -7.394052028656006,
"step": 130
},
{
"epoch": 1.4583333333333333,
"grad_norm": 328.87414514377355,
"learning_rate": 1.8914089264904598e-07,
"logits/chosen": -2.514157772064209,
"logits/rejected": -2.528646469116211,
"logps/chosen": -42.35776138305664,
"logps/rejected": -68.42477416992188,
"loss": 0.3865,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": 6.88818883895874,
"rewards/margins": 10.171409606933594,
"rewards/rejected": -3.283221483230591,
"step": 140
},
{
"epoch": 1.5625,
"grad_norm": 40.158855663364754,
"learning_rate": 1.52767644062691e-07,
"logits/chosen": -2.5543441772460938,
"logits/rejected": -2.5303092002868652,
"logps/chosen": -63.319725036621094,
"logps/rejected": -77.66620635986328,
"loss": 0.3882,
"rewards/accuracies": 0.4625000059604645,
"rewards/chosen": 8.774893760681152,
"rewards/margins": 12.840434074401855,
"rewards/rejected": -4.065541744232178,
"step": 150
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.8217104876546546,
"learning_rate": 1.1639439547633599e-07,
"logits/chosen": -2.5138983726501465,
"logits/rejected": -2.521850109100342,
"logps/chosen": -69.83772277832031,
"logps/rejected": -95.20994567871094,
"loss": 0.3683,
"rewards/accuracies": 0.543749988079071,
"rewards/chosen": 11.020576477050781,
"rewards/margins": 18.24923324584961,
"rewards/rejected": -7.228658199310303,
"step": 160
},
{
"epoch": 1.7708333333333335,
"grad_norm": 171.24133690438418,
"learning_rate": 8.0021146889981e-08,
"logits/chosen": -2.4773125648498535,
"logits/rejected": -2.4666218757629395,
"logps/chosen": -66.26407623291016,
"logps/rejected": -90.37199401855469,
"loss": 0.3983,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 8.90200138092041,
"rewards/margins": 16.24318504333496,
"rewards/rejected": -7.341184139251709,
"step": 170
},
{
"epoch": 1.875,
"grad_norm": 186.68325753918893,
"learning_rate": 4.3647898303625996e-08,
"logits/chosen": -2.5022826194763184,
"logits/rejected": -2.4847512245178223,
"logps/chosen": -63.6507453918457,
"logps/rejected": -80.68909454345703,
"loss": 0.3745,
"rewards/accuracies": 0.5,
"rewards/chosen": 8.155413627624512,
"rewards/margins": 15.189491271972656,
"rewards/rejected": -7.034076690673828,
"step": 180
},
{
"epoch": 1.9791666666666665,
"grad_norm": 1.1959777156996747,
"learning_rate": 7.274649717270999e-09,
"logits/chosen": -2.539518356323242,
"logits/rejected": -2.5355026721954346,
"logps/chosen": -77.53422546386719,
"logps/rejected": -100.30330657958984,
"loss": 0.373,
"rewards/accuracies": 0.53125,
"rewards/chosen": 10.861578941345215,
"rewards/margins": 20.484987258911133,
"rewards/rejected": -9.62341022491455,
"step": 190
},
{
"epoch": 2.0,
"step": 192,
"total_flos": 0.0,
"train_loss": 0.572633953144153,
"train_runtime": 2040.9175,
"train_samples_per_second": 5.99,
"train_steps_per_second": 0.094
}
],
"logging_steps": 10,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}