Muqeeth's picture
Training in progress, step 8, checkpoint
3b78d8a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"episode": 4096,
"epoch": 0.01754626456477039,
"eval_steps": 500,
"global_step": 8,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"episode": 512,
"epoch": 0.0021932830705962986,
"eps": 6,
"loss/policy_avg": 0.18060211837291718,
"lr": 3e-06,
"objective/entropy": -44.39540100097656,
"objective/kl": 0.23749515414237976,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 4.621584892272949,
"objective/scores": 4.621584892272949,
"policy/approxkl_avg": 0.09178640693426132,
"policy/clipfrac_avg": 0.271484375,
"policy/entropy_avg": 0.7151485681533813,
"step": 0,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 5024,
"val/ratio": 1.0001094341278076,
"val/ratio_var": 1.1996758075838443e-06
},
{
"episode": 1024,
"epoch": 0.004386566141192597,
"eps": 6,
"loss/policy_avg": 0.10823587328195572,
"lr": 2.9882812500000002e-06,
"objective/entropy": -40.33261489868164,
"objective/kl": 0.8562099933624268,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 4.949611186981201,
"objective/scores": 4.949611186981201,
"policy/approxkl_avg": 0.09872549772262573,
"policy/clipfrac_avg": 0.29296875,
"policy/entropy_avg": 0.6909540891647339,
"step": 1,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 3988,
"val/ratio": 1.0000126361846924,
"val/ratio_var": 1.5850138197492925e-06
},
{
"episode": 1536,
"epoch": 0.006579849211788897,
"eps": 6,
"loss/policy_avg": 0.24130496382713318,
"lr": 2.9765625e-06,
"objective/entropy": -37.140586853027344,
"objective/kl": 2.1461260318756104,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 5.433126449584961,
"objective/scores": 5.433126449584961,
"policy/approxkl_avg": 0.1151532381772995,
"policy/clipfrac_avg": 0.30078125,
"policy/entropy_avg": 0.6479897499084473,
"step": 2,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 2924,
"val/ratio": 0.999896764755249,
"val/ratio_var": 1.4944761232982273e-06
},
{
"episode": 2048,
"epoch": 0.008773132282385195,
"eps": 6,
"loss/policy_avg": 0.11201904714107513,
"lr": 2.96484375e-06,
"objective/entropy": -23.162431716918945,
"objective/kl": 4.706851005554199,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 5.616954803466797,
"objective/scores": 5.616954803466797,
"policy/approxkl_avg": 0.13956432044506073,
"policy/clipfrac_avg": 0.365234375,
"policy/entropy_avg": 0.6664378643035889,
"step": 3,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 2146,
"val/ratio": 0.9996535778045654,
"val/ratio_var": 2.1856692455912707e-06
},
{
"episode": 2560,
"epoch": 0.010966415352981495,
"eps": 6,
"loss/policy_avg": 0.21055178344249725,
"lr": 2.953125e-06,
"objective/entropy": -17.498517990112305,
"objective/kl": 6.893625259399414,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 5.975703716278076,
"objective/scores": 5.975703716278076,
"policy/approxkl_avg": 0.14695778489112854,
"policy/clipfrac_avg": 0.33203125,
"policy/entropy_avg": 0.6391204595565796,
"step": 4,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 1715,
"val/ratio": 0.9999969005584717,
"val/ratio_var": 2.4044477413553977e-06
},
{
"episode": 3072,
"epoch": 0.013159698423577794,
"eps": 6,
"loss/policy_avg": 0.28335219621658325,
"lr": 2.94140625e-06,
"objective/entropy": -16.186603546142578,
"objective/kl": 8.215164184570312,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 6.314523696899414,
"objective/scores": 6.314523696899414,
"policy/approxkl_avg": 0.15406616032123566,
"policy/clipfrac_avg": 0.34375,
"policy/entropy_avg": 0.6234536170959473,
"step": 5,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 1743,
"val/ratio": 1.0000813007354736,
"val/ratio_var": 2.2491783511213725e-06
},
{
"episode": 3584,
"epoch": 0.015352981494174092,
"eps": 6,
"loss/policy_avg": 0.2581750154495239,
"lr": 2.9296875e-06,
"objective/entropy": -16.82525634765625,
"objective/kl": 10.012128829956055,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 6.68141508102417,
"objective/scores": 6.68141508102417,
"policy/approxkl_avg": 0.15727216005325317,
"policy/clipfrac_avg": 0.306640625,
"policy/entropy_avg": 0.6279575824737549,
"step": 6,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 2024,
"val/ratio": 1.0000149011611938,
"val/ratio_var": 3.910876330337487e-06
},
{
"episode": 4096,
"epoch": 0.01754626456477039,
"eps": 6,
"loss/policy_avg": 0.4508611857891083,
"lr": 2.91796875e-06,
"objective/entropy": -13.531968116760254,
"objective/kl": 11.674339294433594,
"objective/non_score_reward": 0.0,
"objective/rlhf_reward": 6.797825813293457,
"objective/scores": 6.797825813293457,
"policy/approxkl_avg": 0.17787906527519226,
"policy/clipfrac_avg": 0.333984375,
"policy/entropy_avg": 0.6140714287757874,
"step": 7,
"val/clipfrac_avg": 0.0,
"val/num_eos_tokens": 2248,
"val/ratio": 0.9998712539672852,
"val/ratio_var": 2.3673501345911063e-06
}
],
"logging_steps": 1,
"max_steps": 128,
"num_input_tokens_seen": 0,
"num_train_epochs": 1.122960932145305,
"save_steps": 2,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": true,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0,
"train_batch_size": null,
"trial_name": null,
"trial_params": null
}