| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.2142190053700677, | |
| "eval_steps": 500, | |
| "global_step": 1300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0009339248190520663, | |
| "grad_norm": 6.638877692627699, | |
| "learning_rate": 9.345794392523364e-07, | |
| "loss": 9.2917, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.009339248190520663, | |
| "grad_norm": 1.1560921335705272, | |
| "learning_rate": 9.345794392523365e-06, | |
| "loss": 9.0876, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.018678496381041326, | |
| "grad_norm": 0.8415132296956432, | |
| "learning_rate": 1.869158878504673e-05, | |
| "loss": 8.2164, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02801774457156199, | |
| "grad_norm": 0.45381630992958155, | |
| "learning_rate": 2.8037383177570094e-05, | |
| "loss": 7.5184, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03735699276208265, | |
| "grad_norm": 0.8400636107958425, | |
| "learning_rate": 3.738317757009346e-05, | |
| "loss": 6.6507, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.046696240952603316, | |
| "grad_norm": 0.557696240829066, | |
| "learning_rate": 4.672897196261683e-05, | |
| "loss": 5.8909, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05603548914312398, | |
| "grad_norm": 0.3971996057467842, | |
| "learning_rate": 5.607476635514019e-05, | |
| "loss": 5.4127, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06537473733364464, | |
| "grad_norm": 0.2932710540265688, | |
| "learning_rate": 6.542056074766355e-05, | |
| "loss": 5.0106, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0747139855241653, | |
| "grad_norm": 0.3682690443551033, | |
| "learning_rate": 7.476635514018692e-05, | |
| "loss": 4.6042, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.08405323371468597, | |
| "grad_norm": 0.3132971920011515, | |
| "learning_rate": 8.411214953271028e-05, | |
| "loss": 4.2031, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.09339248190520663, | |
| "grad_norm": 0.6731868159213446, | |
| "learning_rate": 9.345794392523365e-05, | |
| "loss": 3.9423, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1027317300957273, | |
| "grad_norm": 0.27848867836763197, | |
| "learning_rate": 0.000102803738317757, | |
| "loss": 3.7157, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.11207097828624796, | |
| "grad_norm": 0.24642109032991807, | |
| "learning_rate": 0.00011214953271028037, | |
| "loss": 3.4516, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.12141022647676862, | |
| "grad_norm": 0.25717384664029797, | |
| "learning_rate": 0.00012149532710280373, | |
| "loss": 3.2167, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.13074947466728928, | |
| "grad_norm": 0.20912922668565637, | |
| "learning_rate": 0.0001308411214953271, | |
| "loss": 3.0237, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.14008872285780993, | |
| "grad_norm": 0.15805888388706113, | |
| "learning_rate": 0.00014018691588785047, | |
| "loss": 2.8529, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1494279710483306, | |
| "grad_norm": 0.23370349497479534, | |
| "learning_rate": 0.00014953271028037384, | |
| "loss": 2.7078, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.15876721923885126, | |
| "grad_norm": 0.1802138633012483, | |
| "learning_rate": 0.0001588785046728972, | |
| "loss": 2.6115, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.16810646742937194, | |
| "grad_norm": 0.13354347610039718, | |
| "learning_rate": 0.00016822429906542056, | |
| "loss": 2.5309, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.17744571561989259, | |
| "grad_norm": 0.09414865188086892, | |
| "learning_rate": 0.00017757009345794393, | |
| "loss": 2.4452, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.18678496381041326, | |
| "grad_norm": 0.08333601554768896, | |
| "learning_rate": 0.0001869158878504673, | |
| "loss": 2.3832, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1961242120009339, | |
| "grad_norm": 0.15926414699806835, | |
| "learning_rate": 0.00019626168224299065, | |
| "loss": 2.3492, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2054634601914546, | |
| "grad_norm": 0.09492820761057012, | |
| "learning_rate": 0.0001999989254250208, | |
| "loss": 2.323, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.21480270838197524, | |
| "grad_norm": 0.0801349259356147, | |
| "learning_rate": 0.00019999235866155886, | |
| "loss": 2.2731, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.22414195657249592, | |
| "grad_norm": 0.12210960524693895, | |
| "learning_rate": 0.00019997982251228469, | |
| "loss": 2.2433, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.23348120476301656, | |
| "grad_norm": 3.14289498732125, | |
| "learning_rate": 0.00019996131772558666, | |
| "loss": 3.2769, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.24282045295353724, | |
| "grad_norm": 1.632940983166179, | |
| "learning_rate": 0.00019993684540617132, | |
| "loss": 4.9343, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2521597011440579, | |
| "grad_norm": 3.4831252230225416, | |
| "learning_rate": 0.00019990640701499736, | |
| "loss": 4.2768, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.26149894933457857, | |
| "grad_norm": 1.6069045920523788, | |
| "learning_rate": 0.00019987000436918874, | |
| "loss": 5.9581, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.27083819752509924, | |
| "grad_norm": 0.2220907936615993, | |
| "learning_rate": 0.00019982763964192585, | |
| "loss": 3.8228, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.28017744571561987, | |
| "grad_norm": 0.24737284913291765, | |
| "learning_rate": 0.00019977931536231596, | |
| "loss": 3.1413, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.28951669390614054, | |
| "grad_norm": 4.010404518241152, | |
| "learning_rate": 0.00019972503441524224, | |
| "loss": 2.8432, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.2988559420966612, | |
| "grad_norm": 0.1515583580811596, | |
| "learning_rate": 0.00019966480004119142, | |
| "loss": 2.7859, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.3081951902871819, | |
| "grad_norm": 0.11259395750650594, | |
| "learning_rate": 0.00019959861583606045, | |
| "loss": 2.5821, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.3175344384777025, | |
| "grad_norm": 0.22514797814956813, | |
| "learning_rate": 0.00019952648575094183, | |
| "loss": 2.4517, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3268736866682232, | |
| "grad_norm": 0.08040136172033542, | |
| "learning_rate": 0.00019944841409188767, | |
| "loss": 2.3794, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.3362129348587439, | |
| "grad_norm": 0.054758073593565354, | |
| "learning_rate": 0.00019936440551965263, | |
| "loss": 2.3232, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.34555218304926455, | |
| "grad_norm": 0.06742998909645591, | |
| "learning_rate": 0.00019927446504941577, | |
| "loss": 2.2776, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.35489143123978517, | |
| "grad_norm": 0.048780907584876736, | |
| "learning_rate": 0.00019917859805048096, | |
| "loss": 2.2376, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.36423067943030585, | |
| "grad_norm": 0.0475325963052214, | |
| "learning_rate": 0.00019907681024595663, | |
| "loss": 2.2191, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.3735699276208265, | |
| "grad_norm": 0.054089563211590065, | |
| "learning_rate": 0.00019896910771241387, | |
| "loss": 2.1961, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3829091758113472, | |
| "grad_norm": 0.21798406131864823, | |
| "learning_rate": 0.00019885549687952372, | |
| "loss": 2.2078, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.3922484240018678, | |
| "grad_norm": 0.8673185709111124, | |
| "learning_rate": 0.00019873598452967338, | |
| "loss": 2.3731, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.4015876721923885, | |
| "grad_norm": 0.22424350669971718, | |
| "learning_rate": 0.0001986105777975613, | |
| "loss": 2.6195, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.4109269203829092, | |
| "grad_norm": 0.307418135168262, | |
| "learning_rate": 0.00019847928416977126, | |
| "loss": 2.3624, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.42026616857342985, | |
| "grad_norm": 0.07944722668080402, | |
| "learning_rate": 0.00019834211148432536, | |
| "loss": 2.2799, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.4296054167639505, | |
| "grad_norm": 0.18146933758664588, | |
| "learning_rate": 0.00019819906793021614, | |
| "loss": 2.2177, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.43894466495447115, | |
| "grad_norm": 0.07035825837333018, | |
| "learning_rate": 0.0001980501620469178, | |
| "loss": 2.1767, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.44828391314499183, | |
| "grad_norm": 0.04596186944454228, | |
| "learning_rate": 0.0001978954027238763, | |
| "loss": 2.1598, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.4576231613355125, | |
| "grad_norm": 0.041342347745088055, | |
| "learning_rate": 0.0001977347991999786, | |
| "loss": 2.131, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.46696240952603313, | |
| "grad_norm": 0.04172063219841485, | |
| "learning_rate": 0.00019756836106300137, | |
| "loss": 2.1231, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4763016577165538, | |
| "grad_norm": 0.03373646457711144, | |
| "learning_rate": 0.00019739609824903843, | |
| "loss": 2.1146, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.4856409059070745, | |
| "grad_norm": 0.03736871030676605, | |
| "learning_rate": 0.00019721802104190748, | |
| "loss": 2.1003, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.49498015409759516, | |
| "grad_norm": 0.033931028038211034, | |
| "learning_rate": 0.00019703414007253645, | |
| "loss": 2.0983, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.5043194022881158, | |
| "grad_norm": 0.03790055446070549, | |
| "learning_rate": 0.00019684446631832868, | |
| "loss": 2.092, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.5136586504786365, | |
| "grad_norm": 0.030956192803893078, | |
| "learning_rate": 0.00019664901110250758, | |
| "loss": 2.0807, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5229978986691571, | |
| "grad_norm": 0.03542530209935129, | |
| "learning_rate": 0.00019644778609344068, | |
| "loss": 2.0773, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.5323371468596778, | |
| "grad_norm": 0.040947757568902336, | |
| "learning_rate": 0.00019624080330394306, | |
| "loss": 2.0649, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.5416763950501985, | |
| "grad_norm": 0.034273415973688146, | |
| "learning_rate": 0.00019602807509056018, | |
| "loss": 2.0479, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.5510156432407192, | |
| "grad_norm": 0.031427481498873144, | |
| "learning_rate": 0.00019580961415283028, | |
| "loss": 2.0563, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.5603548914312397, | |
| "grad_norm": 0.03141549752041532, | |
| "learning_rate": 0.00019558543353252611, | |
| "loss": 2.0503, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5696941396217604, | |
| "grad_norm": 0.033012392726428204, | |
| "learning_rate": 0.00019535554661287652, | |
| "loss": 2.0389, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.5790333878122811, | |
| "grad_norm": 0.02913261992661444, | |
| "learning_rate": 0.0001951199671177673, | |
| "loss": 2.036, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.5883726360028018, | |
| "grad_norm": 0.030543903708435332, | |
| "learning_rate": 0.00019487870911092214, | |
| "loss": 2.0326, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.5977118841933224, | |
| "grad_norm": 0.03215005545393897, | |
| "learning_rate": 0.00019463178699506277, | |
| "loss": 2.0231, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.6070511323838431, | |
| "grad_norm": 0.03823630791937631, | |
| "learning_rate": 0.00019437921551104933, | |
| "loss": 2.0293, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6163903805743638, | |
| "grad_norm": 0.03200103149471209, | |
| "learning_rate": 0.00019412100973700038, | |
| "loss": 2.017, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.6257296287648845, | |
| "grad_norm": 0.03841804153577787, | |
| "learning_rate": 0.00019385718508739262, | |
| "loss": 2.0135, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.635068876955405, | |
| "grad_norm": 0.03052396655271533, | |
| "learning_rate": 0.0001935877573121407, | |
| "loss": 2.0237, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6444081251459257, | |
| "grad_norm": 0.033817837533771815, | |
| "learning_rate": 0.00019331274249565717, | |
| "loss": 2.0069, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.6537473733364464, | |
| "grad_norm": 0.028286999650643876, | |
| "learning_rate": 0.00019303215705589194, | |
| "loss": 2.0112, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6630866215269671, | |
| "grad_norm": 0.030693948626257357, | |
| "learning_rate": 0.00019274601774335243, | |
| "loss": 2.0, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.6724258697174877, | |
| "grad_norm": 0.029984615635953022, | |
| "learning_rate": 0.0001924543416401035, | |
| "loss": 2.0028, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6817651179080084, | |
| "grad_norm": 0.02915985673921391, | |
| "learning_rate": 0.00019215714615874755, | |
| "loss": 2.0031, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.6911043660985291, | |
| "grad_norm": 0.028305399777245336, | |
| "learning_rate": 0.00019185444904138528, | |
| "loss": 1.9924, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.7004436142890498, | |
| "grad_norm": 0.036720505429756495, | |
| "learning_rate": 0.00019154626835855628, | |
| "loss": 1.9981, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.7097828624795703, | |
| "grad_norm": 0.0287124048917296, | |
| "learning_rate": 0.00019123262250816034, | |
| "loss": 1.9868, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.719122110670091, | |
| "grad_norm": 0.03318092492837997, | |
| "learning_rate": 0.00019091353021435915, | |
| "loss": 1.9943, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.7284613588606117, | |
| "grad_norm": 0.054035272137015325, | |
| "learning_rate": 0.00019058901052645844, | |
| "loss": 1.9838, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.7378006070511324, | |
| "grad_norm": 0.03184392761983255, | |
| "learning_rate": 0.00019025908281777078, | |
| "loss": 1.982, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.747139855241653, | |
| "grad_norm": 0.029409948164434735, | |
| "learning_rate": 0.00018992376678445908, | |
| "loss": 1.9693, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7564791034321737, | |
| "grad_norm": 0.029656963043919016, | |
| "learning_rate": 0.00018958308244436064, | |
| "loss": 1.9914, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.7658183516226944, | |
| "grad_norm": 0.030843610865326686, | |
| "learning_rate": 0.00018923705013579233, | |
| "loss": 1.9749, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.7751575998132151, | |
| "grad_norm": 0.0377584286045999, | |
| "learning_rate": 0.00018888569051633613, | |
| "loss": 1.9606, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.7844968480037356, | |
| "grad_norm": 0.03147699916274391, | |
| "learning_rate": 0.00018852902456160616, | |
| "loss": 1.9696, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.7938360961942563, | |
| "grad_norm": 0.029193153251471263, | |
| "learning_rate": 0.0001881670735639963, | |
| "loss": 1.9687, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.803175344384777, | |
| "grad_norm": 0.030201895228156087, | |
| "learning_rate": 0.00018779985913140924, | |
| "loss": 1.9678, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.8125145925752977, | |
| "grad_norm": 0.029272319485493213, | |
| "learning_rate": 0.00018742740318596632, | |
| "loss": 1.9697, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.8218538407658184, | |
| "grad_norm": 0.033740176465285654, | |
| "learning_rate": 0.000187049727962699, | |
| "loss": 1.9647, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.831193088956339, | |
| "grad_norm": 0.029539399251208593, | |
| "learning_rate": 0.0001866668560082213, | |
| "loss": 1.9627, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.8405323371468597, | |
| "grad_norm": 0.028666077337951026, | |
| "learning_rate": 0.0001862788101793839, | |
| "loss": 1.9529, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8498715853373804, | |
| "grad_norm": 0.030280792492665805, | |
| "learning_rate": 0.0001858856136419097, | |
| "loss": 1.9536, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.859210833527901, | |
| "grad_norm": 0.03921536105057096, | |
| "learning_rate": 0.0001854872898690106, | |
| "loss": 1.9474, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8685500817184216, | |
| "grad_norm": 0.030632523637038354, | |
| "learning_rate": 0.0001850838626399865, | |
| "loss": 1.9423, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.8778893299089423, | |
| "grad_norm": 0.04615147601979514, | |
| "learning_rate": 0.00018467535603880548, | |
| "loss": 1.946, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.887228578099463, | |
| "grad_norm": 0.028216236017006333, | |
| "learning_rate": 0.00018426179445266616, | |
| "loss": 1.9408, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.8965678262899837, | |
| "grad_norm": 0.0282407563402959, | |
| "learning_rate": 0.00018384320257054177, | |
| "loss": 1.9447, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.9059070744805043, | |
| "grad_norm": 0.029365880854687894, | |
| "learning_rate": 0.0001834196053817062, | |
| "loss": 1.9389, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.915246322671025, | |
| "grad_norm": 0.02855404439130719, | |
| "learning_rate": 0.00018299102817424234, | |
| "loss": 1.9425, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.9245855708615457, | |
| "grad_norm": 0.03238310984070135, | |
| "learning_rate": 0.00018255749653353225, | |
| "loss": 1.9392, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.9339248190520663, | |
| "grad_norm": 0.030115350805430388, | |
| "learning_rate": 0.00018211903634072983, | |
| "loss": 1.956, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9432640672425869, | |
| "grad_norm": 0.039090564032501135, | |
| "learning_rate": 0.0001816756737712158, | |
| "loss": 1.9358, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.9526033154331076, | |
| "grad_norm": 0.028567954476327023, | |
| "learning_rate": 0.000181227435293035, | |
| "loss": 1.9342, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.9619425636236283, | |
| "grad_norm": 0.028158653241284505, | |
| "learning_rate": 0.00018077434766531624, | |
| "loss": 1.9287, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.971281811814149, | |
| "grad_norm": 0.02927795717651538, | |
| "learning_rate": 0.00018031643793667504, | |
| "loss": 1.9298, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.9806210600046696, | |
| "grad_norm": 0.06298340584032344, | |
| "learning_rate": 0.0001798537334435986, | |
| "loss": 1.9303, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.9899603081951903, | |
| "grad_norm": 0.03627278406983255, | |
| "learning_rate": 0.00017938626180881407, | |
| "loss": 1.9285, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.999299556385711, | |
| "grad_norm": 0.03916823421329747, | |
| "learning_rate": 0.00017891405093963938, | |
| "loss": 1.9239, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.0087555451786132, | |
| "grad_norm": 0.03383539251115568, | |
| "learning_rate": 0.00017843712902631723, | |
| "loss": 1.8855, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.0180947933691338, | |
| "grad_norm": 0.02931021936560147, | |
| "learning_rate": 0.00017795552454033224, | |
| "loss": 1.8004, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.0274340415596543, | |
| "grad_norm": 0.029173295095350292, | |
| "learning_rate": 0.0001774692662327113, | |
| "loss": 1.7912, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.0367732897501751, | |
| "grad_norm": 0.035210065642974735, | |
| "learning_rate": 0.000176978383132307, | |
| "loss": 1.7902, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.0461125379406957, | |
| "grad_norm": 0.029794447659573477, | |
| "learning_rate": 0.00017648290454406475, | |
| "loss": 1.8072, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.0554517861312165, | |
| "grad_norm": 0.03194584938279939, | |
| "learning_rate": 0.0001759828600472734, | |
| "loss": 1.803, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.064791034321737, | |
| "grad_norm": 0.031028415312581603, | |
| "learning_rate": 0.00017547827949379924, | |
| "loss": 1.7945, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.0741302825122578, | |
| "grad_norm": 0.03313245828751237, | |
| "learning_rate": 0.00017496919300630403, | |
| "loss": 1.8139, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.0834695307027784, | |
| "grad_norm": 0.03049431724979126, | |
| "learning_rate": 0.00017445563097644664, | |
| "loss": 1.8031, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.0928087788932992, | |
| "grad_norm": 0.02880548705343715, | |
| "learning_rate": 0.00017393762406306878, | |
| "loss": 1.7974, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.1021480270838198, | |
| "grad_norm": 0.03200427895977668, | |
| "learning_rate": 0.00017341520319036469, | |
| "loss": 1.7994, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.1114872752743403, | |
| "grad_norm": 0.031293532815600045, | |
| "learning_rate": 0.00017288839954603496, | |
| "loss": 1.8073, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.1208265234648611, | |
| "grad_norm": 0.032367211109345505, | |
| "learning_rate": 0.00017235724457942468, | |
| "loss": 1.7944, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.1301657716553817, | |
| "grad_norm": 0.037783793923191374, | |
| "learning_rate": 0.0001718217699996462, | |
| "loss": 1.7948, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.1395050198459025, | |
| "grad_norm": 0.02881083117349317, | |
| "learning_rate": 0.00017128200777368567, | |
| "loss": 1.8029, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.148844268036423, | |
| "grad_norm": 0.032997408408175985, | |
| "learning_rate": 0.00017073799012449524, | |
| "loss": 1.7914, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.1581835162269438, | |
| "grad_norm": 0.030263615801190885, | |
| "learning_rate": 0.00017018974952906884, | |
| "loss": 1.792, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.1675227644174644, | |
| "grad_norm": 0.03286310429098484, | |
| "learning_rate": 0.00016963731871650378, | |
| "loss": 1.8149, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.1768620126079852, | |
| "grad_norm": 0.031111621587597126, | |
| "learning_rate": 0.00016908073066604663, | |
| "loss": 1.8092, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.1862012607985057, | |
| "grad_norm": 0.03372950172075241, | |
| "learning_rate": 0.0001685200186051246, | |
| "loss": 1.818, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.1955405089890263, | |
| "grad_norm": 0.040782040436567434, | |
| "learning_rate": 0.00016795521600736164, | |
| "loss": 1.7999, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.204879757179547, | |
| "grad_norm": 0.03451605168178924, | |
| "learning_rate": 0.00016738635659058044, | |
| "loss": 1.7945, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.2142190053700677, | |
| "grad_norm": 0.03235681588882673, | |
| "learning_rate": 0.00016681347431478933, | |
| "loss": 1.8087, | |
| "step": 1300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 4280, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0526118085398626e+20, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |