|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.00125, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.5e-05, |
|
"grad_norm": 1030.0911865234375, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 343.4563, |
|
"loss/crossentropy": 2.979090690612793, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.38270998001098633, |
|
"loss/reg": 343.0735778808594, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5e-05, |
|
"grad_norm": 11.19842529296875, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 343.3944, |
|
"loss/crossentropy": 2.8327953815460205, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.3208012580871582, |
|
"loss/reg": 343.0735778808594, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 7.5e-05, |
|
"grad_norm": 7.9109930992126465, |
|
"learning_rate": 3e-06, |
|
"loss": 343.4621, |
|
"loss/crossentropy": 3.085212469100952, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.3886082172393799, |
|
"loss/reg": 343.0734558105469, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0001, |
|
"grad_norm": 278.76263427734375, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 343.3857, |
|
"loss/crossentropy": 2.7368664741516113, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.31229129433631897, |
|
"loss/reg": 343.0733642578125, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.000125, |
|
"grad_norm": 12.900250434875488, |
|
"learning_rate": 5e-06, |
|
"loss": 343.4269, |
|
"loss/crossentropy": 2.957352638244629, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.35360077023506165, |
|
"loss/reg": 343.0732727050781, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00015, |
|
"grad_norm": 11.356637954711914, |
|
"learning_rate": 6e-06, |
|
"loss": 343.4677, |
|
"loss/crossentropy": 2.8502368927001953, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.3944404125213623, |
|
"loss/reg": 343.07330322265625, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.000175, |
|
"grad_norm": 8.149554252624512, |
|
"learning_rate": 7.000000000000001e-06, |
|
"loss": 343.3993, |
|
"loss/crossentropy": 2.816471815109253, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.3256686329841614, |
|
"loss/reg": 343.0736083984375, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0002, |
|
"grad_norm": 6.3728837966918945, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 343.4041, |
|
"loss/crossentropy": 2.939116954803467, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.33104804158210754, |
|
"loss/reg": 343.0730285644531, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.000225, |
|
"grad_norm": 4.597896575927734, |
|
"learning_rate": 9e-06, |
|
"loss": 343.33, |
|
"loss/crossentropy": 2.5655953884124756, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.257835328578949, |
|
"loss/reg": 343.0721435546875, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00025, |
|
"grad_norm": 8.375211715698242, |
|
"learning_rate": 1e-05, |
|
"loss": 343.3147, |
|
"loss/crossentropy": 2.869285821914673, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.24429196119308472, |
|
"loss/reg": 343.0704345703125, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.000275, |
|
"grad_norm": 3.975677728652954, |
|
"learning_rate": 1.1000000000000001e-05, |
|
"loss": 343.2932, |
|
"loss/crossentropy": 2.670386552810669, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.22499963641166687, |
|
"loss/reg": 343.0682373046875, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0003, |
|
"grad_norm": 5.542755126953125, |
|
"learning_rate": 1.2e-05, |
|
"loss": 343.3178, |
|
"loss/crossentropy": 3.0790939331054688, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.25476354360580444, |
|
"loss/reg": 343.06304931640625, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.000325, |
|
"grad_norm": 2.421396017074585, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 343.2363, |
|
"loss/crossentropy": 2.8270552158355713, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.17909111082553864, |
|
"loss/reg": 343.05718994140625, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00035, |
|
"grad_norm": 2.906562328338623, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 343.1907, |
|
"loss/crossentropy": 2.4849202632904053, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.14080765843391418, |
|
"loss/reg": 343.04986572265625, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.000375, |
|
"grad_norm": 1.9625457525253296, |
|
"learning_rate": 1.5e-05, |
|
"loss": 343.2174, |
|
"loss/crossentropy": 3.037820339202881, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.17379778623580933, |
|
"loss/reg": 343.0436096191406, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0004, |
|
"grad_norm": 1.572465181350708, |
|
"grad_norm_var": 67825.2066513546, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 343.1614, |
|
"loss/crossentropy": 2.9201037883758545, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.12472350150346756, |
|
"loss/reg": 343.0367126464844, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.000425, |
|
"grad_norm": 1.4828628301620483, |
|
"grad_norm_var": 4661.80263977645, |
|
"learning_rate": 1.7000000000000003e-05, |
|
"loss": 343.142, |
|
"loss/crossentropy": 2.6999711990356445, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.11364762485027313, |
|
"loss/reg": 343.0283508300781, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.00045, |
|
"grad_norm": 1.8871707916259766, |
|
"grad_norm_var": 4681.988536067261, |
|
"learning_rate": 1.8e-05, |
|
"loss": 343.1356, |
|
"loss/crossentropy": 3.0806515216827393, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.11845001578330994, |
|
"loss/reg": 343.01715087890625, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.000475, |
|
"grad_norm": 1.2818211317062378, |
|
"grad_norm_var": 4697.640033071101, |
|
"learning_rate": 1.9e-05, |
|
"loss": 343.1078, |
|
"loss/crossentropy": 2.988394260406494, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.09374003112316132, |
|
"loss/reg": 343.0140075683594, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0005, |
|
"grad_norm": 1.2304683923721313, |
|
"grad_norm_var": 13.90872762408596, |
|
"learning_rate": 2e-05, |
|
"loss": 343.1153, |
|
"loss/crossentropy": 2.9062788486480713, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.09611324220895767, |
|
"loss/reg": 343.0191650390625, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.000525, |
|
"grad_norm": 1.2078300714492798, |
|
"grad_norm_var": 9.74869168975174, |
|
"learning_rate": 2.1e-05, |
|
"loss": 343.1086, |
|
"loss/crossentropy": 2.629263162612915, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.08695611357688904, |
|
"loss/reg": 343.0216369628906, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00055, |
|
"grad_norm": 0.9980544447898865, |
|
"grad_norm_var": 6.322323347833189, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 343.0953, |
|
"loss/crossentropy": 2.5109262466430664, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.07269689440727234, |
|
"loss/reg": 343.0226135253906, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.000575, |
|
"grad_norm": 1.1052573919296265, |
|
"grad_norm_var": 4.937209980551387, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 343.1042, |
|
"loss/crossentropy": 3.1142160892486572, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.08075863122940063, |
|
"loss/reg": 343.02349853515625, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0006, |
|
"grad_norm": 0.9378647208213806, |
|
"grad_norm_var": 4.290323212534128, |
|
"learning_rate": 2.4e-05, |
|
"loss": 343.0883, |
|
"loss/crossentropy": 2.7667906284332275, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.06898947060108185, |
|
"loss/reg": 343.0193176269531, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.000625, |
|
"grad_norm": 0.8331053853034973, |
|
"grad_norm_var": 4.169706484634468, |
|
"learning_rate": 2.5e-05, |
|
"loss": 343.0793, |
|
"loss/crossentropy": 2.769016981124878, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.06074293702840805, |
|
"loss/reg": 343.0185852050781, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00065, |
|
"grad_norm": 5.7551751136779785, |
|
"grad_norm_var": 2.4965497148631832, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 343.0914, |
|
"loss/crossentropy": 3.0031418800354004, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.07152244448661804, |
|
"loss/reg": 343.0198669433594, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.000675, |
|
"grad_norm": 0.832045316696167, |
|
"grad_norm_var": 2.3673310147404356, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 343.0815, |
|
"loss/crossentropy": 2.9811668395996094, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.061069637537002563, |
|
"loss/reg": 343.0203857421875, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0007, |
|
"grad_norm": 0.8330180048942566, |
|
"grad_norm_var": 1.5272837131405466, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 343.0781, |
|
"loss/crossentropy": 2.740814447402954, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.054962363094091415, |
|
"loss/reg": 343.0231018066406, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.000725, |
|
"grad_norm": 0.7338961958885193, |
|
"grad_norm_var": 1.5436180822593486, |
|
"learning_rate": 2.9e-05, |
|
"loss": 343.0786, |
|
"loss/crossentropy": 2.854477643966675, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.05266527086496353, |
|
"loss/reg": 343.02593994140625, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.00075, |
|
"grad_norm": 1.040590524673462, |
|
"grad_norm_var": 1.4355461941067227, |
|
"learning_rate": 3e-05, |
|
"loss": 343.0964, |
|
"loss/crossentropy": 2.868793487548828, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.058403968811035156, |
|
"loss/reg": 343.0379943847656, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.000775, |
|
"grad_norm": 0.8477593064308167, |
|
"grad_norm_var": 1.4416249160873411, |
|
"learning_rate": 3.1e-05, |
|
"loss": 343.0889, |
|
"loss/crossentropy": 2.620675802230835, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.05073639750480652, |
|
"loss/reg": 343.03814697265625, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0008, |
|
"grad_norm": 0.9066484570503235, |
|
"grad_norm_var": 1.455016528557988, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 343.0885, |
|
"loss/crossentropy": 2.8256523609161377, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04819165915250778, |
|
"loss/reg": 343.040283203125, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.000825, |
|
"grad_norm": 1.3142300844192505, |
|
"grad_norm_var": 1.4542471534457053, |
|
"learning_rate": 3.3e-05, |
|
"loss": 343.1022, |
|
"loss/crossentropy": 3.081716299057007, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.050295330584049225, |
|
"loss/reg": 343.0518798828125, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00085, |
|
"grad_norm": 1.2658751010894775, |
|
"grad_norm_var": 1.4346241521510184, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 343.1093, |
|
"loss/crossentropy": 3.0131924152374268, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.05284588038921356, |
|
"loss/reg": 343.056396484375, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.000875, |
|
"grad_norm": 0.9082849025726318, |
|
"grad_norm_var": 1.445257555860012, |
|
"learning_rate": 3.5e-05, |
|
"loss": 343.1118, |
|
"loss/crossentropy": 2.540893077850342, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.0445842370390892, |
|
"loss/reg": 343.0672607421875, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0009, |
|
"grad_norm": 0.9978659749031067, |
|
"grad_norm_var": 1.4506987607639776, |
|
"learning_rate": 3.6e-05, |
|
"loss": 343.1154, |
|
"loss/crossentropy": 2.965841054916382, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04799606651067734, |
|
"loss/reg": 343.0673828125, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.000925, |
|
"grad_norm": 1.0756022930145264, |
|
"grad_norm_var": 1.453105227206015, |
|
"learning_rate": 3.7e-05, |
|
"loss": 343.1204, |
|
"loss/crossentropy": 2.497769832611084, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04083085432648659, |
|
"loss/reg": 343.07952880859375, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.00095, |
|
"grad_norm": 0.9938598871231079, |
|
"grad_norm_var": 1.4532607006280953, |
|
"learning_rate": 3.8e-05, |
|
"loss": 343.1224, |
|
"loss/crossentropy": 2.590686559677124, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03972693532705307, |
|
"loss/reg": 343.0826416015625, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.000975, |
|
"grad_norm": 1.0691267251968384, |
|
"grad_norm_var": 1.4541543145040774, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 343.1341, |
|
"loss/crossentropy": 2.837007999420166, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.05051286518573761, |
|
"loss/reg": 343.0836181640625, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.001, |
|
"grad_norm": 0.8595760464668274, |
|
"grad_norm_var": 1.4580206512604335, |
|
"learning_rate": 4e-05, |
|
"loss": 343.1299, |
|
"loss/crossentropy": 2.654414176940918, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03836715966463089, |
|
"loss/reg": 343.0915832519531, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.001025, |
|
"grad_norm": 1.2864758968353271, |
|
"grad_norm_var": 1.4446587103261934, |
|
"learning_rate": 4.1e-05, |
|
"loss": 343.1393, |
|
"loss/crossentropy": 3.039605140686035, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.050123609602451324, |
|
"loss/reg": 343.0892028808594, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.00105, |
|
"grad_norm": 0.8998011350631714, |
|
"grad_norm_var": 0.030633521906435077, |
|
"learning_rate": 4.2e-05, |
|
"loss": 343.139, |
|
"loss/crossentropy": 2.6901302337646484, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04389282315969467, |
|
"loss/reg": 343.0951232910156, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.001075, |
|
"grad_norm": 1.2220802307128906, |
|
"grad_norm_var": 0.03184695650448734, |
|
"learning_rate": 4.3e-05, |
|
"loss": 343.1362, |
|
"loss/crossentropy": 2.9773449897766113, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04332903027534485, |
|
"loss/reg": 343.0928955078125, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0011, |
|
"grad_norm": 0.8919843435287476, |
|
"grad_norm_var": 0.030626277057042593, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 343.1251, |
|
"loss/crossentropy": 2.795628309249878, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.038088709115982056, |
|
"loss/reg": 343.0870361328125, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.001125, |
|
"grad_norm": 0.8417184352874756, |
|
"grad_norm_var": 0.027245464758761707, |
|
"learning_rate": 4.5e-05, |
|
"loss": 343.1212, |
|
"loss/crossentropy": 2.858243465423584, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03852926567196846, |
|
"loss/reg": 343.0826110839844, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00115, |
|
"grad_norm": 0.8550565838813782, |
|
"grad_norm_var": 0.029044425822037433, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 343.1173, |
|
"loss/crossentropy": 2.7999496459960938, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03844599053263664, |
|
"loss/reg": 343.078857421875, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.001175, |
|
"grad_norm": 0.958902895450592, |
|
"grad_norm_var": 0.027341873139830008, |
|
"learning_rate": 4.7e-05, |
|
"loss": 343.1191, |
|
"loss/crossentropy": 2.7856900691986084, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.0381263792514801, |
|
"loss/reg": 343.0809631347656, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0012, |
|
"grad_norm": 0.9236550331115723, |
|
"grad_norm_var": 0.02709908097331013, |
|
"learning_rate": 4.8e-05, |
|
"loss": 343.1286, |
|
"loss/crossentropy": 2.7613890171051025, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03775141388177872, |
|
"loss/reg": 343.09088134765625, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.001225, |
|
"grad_norm": 0.94582200050354, |
|
"grad_norm_var": 0.021264340723863028, |
|
"learning_rate": 4.9e-05, |
|
"loss": 343.126, |
|
"loss/crossentropy": 2.9882125854492188, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.03915192931890488, |
|
"loss/reg": 343.0868835449219, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.00125, |
|
"grad_norm": 1.2259169816970825, |
|
"grad_norm_var": 0.01994617955941269, |
|
"learning_rate": 5e-05, |
|
"loss": 343.129, |
|
"loss/crossentropy": 2.7120375633239746, |
|
"loss/hidden": 15.25, |
|
"loss/logits": 0.04252094402909279, |
|
"loss/reg": 343.08648681640625, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 40000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": true, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3227844083712000.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|