machineLearningCourse / trainer_state.json
mataoxun
提交
eab69cd
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 28.960817717206133,
"global_step": 34000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.43,
"learning_rate": 9.369363916964232e-05,
"loss": 0.5598,
"step": 500
},
{
"epoch": 0.85,
"learning_rate": 9.218362282878412e-05,
"loss": 0.5495,
"step": 1000
},
{
"epoch": 1.28,
"learning_rate": 9.067058040307452e-05,
"loss": 0.5008,
"step": 1500
},
{
"epoch": 1.7,
"learning_rate": 8.915753797736489e-05,
"loss": 0.4756,
"step": 2000
},
{
"epoch": 2.13,
"learning_rate": 8.764449555165527e-05,
"loss": 0.4689,
"step": 2500
},
{
"epoch": 2.56,
"learning_rate": 8.613447921079707e-05,
"loss": 0.4223,
"step": 3000
},
{
"epoch": 2.98,
"learning_rate": 8.462143678508746e-05,
"loss": 0.4444,
"step": 3500
},
{
"epoch": 3.41,
"learning_rate": 8.310839435937784e-05,
"loss": 0.3734,
"step": 4000
},
{
"epoch": 3.83,
"learning_rate": 8.159535193366822e-05,
"loss": 0.4001,
"step": 4500
},
{
"epoch": 4.26,
"learning_rate": 8.00823095079586e-05,
"loss": 0.3875,
"step": 5000
},
{
"epoch": 4.68,
"learning_rate": 7.856926708224899e-05,
"loss": 0.3557,
"step": 5500
},
{
"epoch": 5.11,
"learning_rate": 7.705622465653937e-05,
"loss": 0.34,
"step": 6000
},
{
"epoch": 5.54,
"learning_rate": 7.554318223082976e-05,
"loss": 0.3244,
"step": 6500
},
{
"epoch": 5.96,
"learning_rate": 7.403013980512014e-05,
"loss": 0.3372,
"step": 7000
},
{
"epoch": 6.39,
"learning_rate": 7.251709737941052e-05,
"loss": 0.3099,
"step": 7500
},
{
"epoch": 6.81,
"learning_rate": 7.10040549537009e-05,
"loss": 0.3108,
"step": 8000
},
{
"epoch": 7.24,
"learning_rate": 6.949101252799129e-05,
"loss": 0.2705,
"step": 8500
},
{
"epoch": 7.67,
"learning_rate": 6.797797010228167e-05,
"loss": 0.2856,
"step": 9000
},
{
"epoch": 8.09,
"learning_rate": 6.646795376142348e-05,
"loss": 0.2885,
"step": 9500
},
{
"epoch": 8.52,
"learning_rate": 6.495491133571386e-05,
"loss": 0.2729,
"step": 10000
},
{
"epoch": 8.94,
"learning_rate": 6.344186891000424e-05,
"loss": 0.274,
"step": 10500
},
{
"epoch": 9.37,
"learning_rate": 6.192882648429462e-05,
"loss": 0.2465,
"step": 11000
},
{
"epoch": 9.8,
"learning_rate": 6.041881014343642e-05,
"loss": 0.2517,
"step": 11500
},
{
"epoch": 10.22,
"learning_rate": 5.890576771772681e-05,
"loss": 0.2512,
"step": 12000
},
{
"epoch": 10.65,
"learning_rate": 5.739272529201719e-05,
"loss": 0.239,
"step": 12500
},
{
"epoch": 11.07,
"learning_rate": 5.587968286630757e-05,
"loss": 0.2302,
"step": 13000
},
{
"epoch": 11.5,
"learning_rate": 5.436966652544938e-05,
"loss": 0.2256,
"step": 13500
},
{
"epoch": 11.93,
"learning_rate": 5.285662409973976e-05,
"loss": 0.2259,
"step": 14000
},
{
"epoch": 12.35,
"learning_rate": 5.1343581674030136e-05,
"loss": 0.206,
"step": 14500
},
{
"epoch": 12.78,
"learning_rate": 4.9830539248320526e-05,
"loss": 0.2197,
"step": 15000
},
{
"epoch": 13.2,
"learning_rate": 4.831749682261091e-05,
"loss": 0.2008,
"step": 15500
},
{
"epoch": 13.63,
"learning_rate": 4.680445439690129e-05,
"loss": 0.1843,
"step": 16000
},
{
"epoch": 14.05,
"learning_rate": 4.5291411971191675e-05,
"loss": 0.1896,
"step": 16500
},
{
"epoch": 14.48,
"learning_rate": 4.377836954548206e-05,
"loss": 0.184,
"step": 17000
},
{
"epoch": 14.91,
"learning_rate": 4.226532711977244e-05,
"loss": 0.1788,
"step": 17500
},
{
"epoch": 15.33,
"learning_rate": 4.0752284694062824e-05,
"loss": 0.174,
"step": 18000
},
{
"epoch": 15.76,
"learning_rate": 3.923924226835321e-05,
"loss": 0.177,
"step": 18500
},
{
"epoch": 16.18,
"learning_rate": 3.772619984264359e-05,
"loss": 0.161,
"step": 19000
},
{
"epoch": 16.61,
"learning_rate": 3.6213157416933974e-05,
"loss": 0.1548,
"step": 19500
},
{
"epoch": 17.04,
"learning_rate": 3.470616716092719e-05,
"loss": 0.1719,
"step": 20000
},
{
"epoch": 17.46,
"learning_rate": 3.3193124735217576e-05,
"loss": 0.159,
"step": 20500
},
{
"epoch": 17.89,
"learning_rate": 3.168008230950796e-05,
"loss": 0.1466,
"step": 21000
},
{
"epoch": 18.31,
"learning_rate": 3.0167039883798342e-05,
"loss": 0.153,
"step": 21500
},
{
"epoch": 18.74,
"learning_rate": 2.865399745808873e-05,
"loss": 0.1399,
"step": 22000
},
{
"epoch": 19.17,
"learning_rate": 2.7140955032379112e-05,
"loss": 0.1369,
"step": 22500
},
{
"epoch": 19.59,
"learning_rate": 2.5630938691520913e-05,
"loss": 0.1397,
"step": 23000
},
{
"epoch": 20.02,
"learning_rate": 2.4117896265811293e-05,
"loss": 0.1351,
"step": 23500
},
{
"epoch": 20.44,
"learning_rate": 2.260485384010168e-05,
"loss": 0.1256,
"step": 24000
},
{
"epoch": 20.87,
"learning_rate": 2.109181141439206e-05,
"loss": 0.128,
"step": 24500
},
{
"epoch": 21.29,
"learning_rate": 1.9581795073533863e-05,
"loss": 0.1184,
"step": 25000
},
{
"epoch": 21.72,
"learning_rate": 1.8068752647824246e-05,
"loss": 0.1312,
"step": 25500
},
{
"epoch": 22.15,
"learning_rate": 1.655571022211463e-05,
"loss": 0.1138,
"step": 26000
},
{
"epoch": 22.57,
"learning_rate": 1.5042667796405011e-05,
"loss": 0.1123,
"step": 26500
},
{
"epoch": 23.0,
"learning_rate": 1.3529625370695396e-05,
"loss": 0.1144,
"step": 27000
},
{
"epoch": 23.42,
"learning_rate": 1.2019609029837197e-05,
"loss": 0.1076,
"step": 27500
},
{
"epoch": 23.85,
"learning_rate": 1.0509592688979e-05,
"loss": 0.1054,
"step": 28000
},
{
"epoch": 24.28,
"learning_rate": 8.996550263269383e-06,
"loss": 0.1033,
"step": 28500
},
{
"epoch": 24.7,
"learning_rate": 7.483507837559765e-06,
"loss": 0.0993,
"step": 29000
},
{
"epoch": 25.13,
"learning_rate": 5.970465411850148e-06,
"loss": 0.1,
"step": 29500
},
{
"epoch": 25.55,
"learning_rate": 4.457422986140532e-06,
"loss": 0.0985,
"step": 30000
},
{
"epoch": 25.98,
"learning_rate": 2.9443805604309144e-06,
"loss": 0.0928,
"step": 30500
},
{
"epoch": 26.41,
"learning_rate": 1.4313381347212977e-06,
"loss": 0.0913,
"step": 31000
},
{
"epoch": 26.83,
"learning_rate": 0.0,
"loss": 0.0947,
"step": 31500
},
{
"epoch": 27.26,
"learning_rate": 0.0,
"loss": 0.1028,
"step": 32000
},
{
"epoch": 27.68,
"learning_rate": 0.0,
"loss": 0.0895,
"step": 32500
},
{
"epoch": 28.11,
"learning_rate": 0.0,
"loss": 0.0894,
"step": 33000
},
{
"epoch": 28.53,
"learning_rate": 0.0,
"loss": 0.0894,
"step": 33500
},
{
"epoch": 28.96,
"learning_rate": 0.0,
"loss": 0.0924,
"step": 34000
}
],
"max_steps": 34046,
"num_train_epochs": 29,
"total_flos": 9.909309665241907e+18,
"trial_name": null,
"trial_params": null
}