| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 15.125983078744758, | |
| "learning_rate": 3.787878787878788e-07, | |
| "loss": 0.8308, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 5.106211060706855, | |
| "learning_rate": 7.575757575757576e-07, | |
| "loss": 0.8107, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 1.6409244850292617, | |
| "learning_rate": 1.1363636363636364e-06, | |
| "loss": 0.7552, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 1.0049951242628896, | |
| "learning_rate": 1.5151515151515152e-06, | |
| "loss": 0.7308, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.9157452499624658, | |
| "learning_rate": 1.8939393939393941e-06, | |
| "loss": 0.7158, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.8628839965574723, | |
| "learning_rate": 2.2727272727272728e-06, | |
| "loss": 0.6902, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.7308412022540539, | |
| "learning_rate": 2.6515151515151514e-06, | |
| "loss": 0.6844, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.868821693248259, | |
| "learning_rate": 3.0303030303030305e-06, | |
| "loss": 0.6843, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.7991099571783645, | |
| "learning_rate": 3.409090909090909e-06, | |
| "loss": 0.6651, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.8494515975438381, | |
| "learning_rate": 3.7878787878787882e-06, | |
| "loss": 0.6616, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.6688481290670764, | |
| "learning_rate": 4.166666666666667e-06, | |
| "loss": 0.6479, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.5229831628334457, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 0.6519, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.37649939369854807, | |
| "learning_rate": 4.924242424242425e-06, | |
| "loss": 0.6501, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.3422897131579416, | |
| "learning_rate": 4.999446185282947e-06, | |
| "loss": 0.6506, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.3241007372983375, | |
| "learning_rate": 4.997196742099456e-06, | |
| "loss": 0.6424, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.32292063048607617, | |
| "learning_rate": 4.993218644771816e-06, | |
| "loss": 0.6469, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.3604169283465372, | |
| "learning_rate": 4.987514703353798e-06, | |
| "loss": 0.6383, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.31383488263493586, | |
| "learning_rate": 4.980088947003254e-06, | |
| "loss": 0.6435, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.30348082668380777, | |
| "learning_rate": 4.970946621135998e-06, | |
| "loss": 0.6357, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.3160900714261373, | |
| "learning_rate": 4.960094183720539e-06, | |
| "loss": 0.6436, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3042936930583843, | |
| "learning_rate": 4.947539300716295e-06, | |
| "loss": 0.6396, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.3075635512158751, | |
| "learning_rate": 4.933290840658501e-06, | |
| "loss": 0.6432, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.38940758621282484, | |
| "learning_rate": 4.917358868393641e-06, | |
| "loss": 0.6387, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.2941815609309389, | |
| "learning_rate": 4.8997546379698304e-06, | |
| "loss": 0.6344, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.33217327438419464, | |
| "learning_rate": 4.880490584687161e-06, | |
| "loss": 0.6392, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.30544876573581253, | |
| "learning_rate": 4.8595803163136455e-06, | |
| "loss": 0.6372, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.31345899291427864, | |
| "learning_rate": 4.837038603472932e-06, | |
| "loss": 0.6343, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.31662570229165077, | |
| "learning_rate": 4.812881369210625e-06, | |
| "loss": 0.6449, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3224781690706616, | |
| "learning_rate": 4.787125677746528e-06, | |
| "loss": 0.6335, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.2866537117310808, | |
| "learning_rate": 4.75978972242081e-06, | |
| "loss": 0.6314, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.29647749002081614, | |
| "learning_rate": 4.730892812842562e-06, | |
| "loss": 0.6312, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3324269942634098, | |
| "learning_rate": 4.700455361249849e-06, | |
| "loss": 0.6317, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.29925598379381463, | |
| "learning_rate": 4.668498868090891e-06, | |
| "loss": 0.6283, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.3154305379821916, | |
| "learning_rate": 4.635045906836541e-06, | |
| "loss": 0.6341, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.3199649642865213, | |
| "learning_rate": 4.6001201080348096e-06, | |
| "loss": 0.6411, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.32037054661311365, | |
| "learning_rate": 4.563746142618686e-06, | |
| "loss": 0.6328, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.3318697710361171, | |
| "learning_rate": 4.525949704479052e-06, | |
| "loss": 0.6265, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.3246050423002289, | |
| "learning_rate": 4.486757492314996e-06, | |
| "loss": 0.6387, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.30506469521512686, | |
| "learning_rate": 4.446197190774345e-06, | |
| "loss": 0.6372, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.3085994412780229, | |
| "learning_rate": 4.404297450897755e-06, | |
| "loss": 0.6404, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.2749956429354596, | |
| "learning_rate": 4.361087869880142e-06, | |
| "loss": 0.6366, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.2936678775920064, | |
| "learning_rate": 4.31659897016378e-06, | |
| "loss": 0.6234, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.3336527691105597, | |
| "learning_rate": 4.270862177877817e-06, | |
| "loss": 0.6365, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.627018392086029, | |
| "eval_runtime": 440.9493, | |
| "eval_samples_per_second": 26.813, | |
| "eval_steps_per_second": 0.42, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.3118348041353063, | |
| "learning_rate": 4.2239098006394365e-06, | |
| "loss": 0.6498, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.3256449525170415, | |
| "learning_rate": 4.175775004732371e-06, | |
| "loss": 0.6077, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.32702392722031254, | |
| "learning_rate": 4.1264917916788475e-06, | |
| "loss": 0.6013, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.28050319740758334, | |
| "learning_rate": 4.07609497422155e-06, | |
| "loss": 0.6023, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.3000718527011783, | |
| "learning_rate": 4.024620151732549e-06, | |
| "loss": 0.6098, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.308697561622843, | |
| "learning_rate": 3.972103685066567e-06, | |
| "loss": 0.6147, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.27367615993888206, | |
| "learning_rate": 3.9185826708763455e-06, | |
| "loss": 0.6045, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.3131261213067205, | |
| "learning_rate": 3.86409491540827e-06, | |
| "loss": 0.6023, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.3082714238602967, | |
| "learning_rate": 3.8086789077967335e-06, | |
| "loss": 0.6079, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.2896155404732298, | |
| "learning_rate": 3.7523737928761345e-06, | |
| "loss": 0.6009, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.2949864594007117, | |
| "learning_rate": 3.695219343529694e-06, | |
| "loss": 0.611, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.29341498947539596, | |
| "learning_rate": 3.637255932594635e-06, | |
| "loss": 0.6074, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.28661195720596744, | |
| "learning_rate": 3.578524504343561e-06, | |
| "loss": 0.609, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.3026330084600481, | |
| "learning_rate": 3.5190665455621842e-06, | |
| "loss": 0.6029, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.2658709316204282, | |
| "learning_rate": 3.458924056243836e-06, | |
| "loss": 0.6012, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.2684949299976577, | |
| "learning_rate": 3.3981395199214567e-06, | |
| "loss": 0.6012, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.299097522611276, | |
| "learning_rate": 3.336755873658014e-06, | |
| "loss": 0.6066, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.3135014047131652, | |
| "learning_rate": 3.274816477716569e-06, | |
| "loss": 0.6066, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.2934151051131123, | |
| "learning_rate": 3.2123650849313897e-06, | |
| "loss": 0.6093, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.2830185953651649, | |
| "learning_rate": 3.149445809801768e-06, | |
| "loss": 0.607, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.29912134559967146, | |
| "learning_rate": 3.0861030973303573e-06, | |
| "loss": 0.6019, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.2935467188047777, | |
| "learning_rate": 3.0223816916280574e-06, | |
| "loss": 0.5996, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.27437783484675404, | |
| "learning_rate": 2.9583266043075992e-06, | |
| "loss": 0.5994, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.29614627246404845, | |
| "learning_rate": 2.893983082688191e-06, | |
| "loss": 0.607, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.26516288075680605, | |
| "learning_rate": 2.8293965778336474e-06, | |
| "loss": 0.5956, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.32640130516394655, | |
| "learning_rate": 2.7646127124466114e-06, | |
| "loss": 0.5994, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.28248568655010486, | |
| "learning_rate": 2.6996772486415277e-06, | |
| "loss": 0.6152, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.2898149513994004, | |
| "learning_rate": 2.6346360556191326e-06, | |
| "loss": 0.6022, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.28975370189373456, | |
| "learning_rate": 2.569535077265311e-06, | |
| "loss": 0.6065, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.2962120678740453, | |
| "learning_rate": 2.504420299697193e-06, | |
| "loss": 0.5996, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.2880350725196617, | |
| "learning_rate": 2.4393377187794146e-06, | |
| "loss": 0.6005, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.28966271136759913, | |
| "learning_rate": 2.3743333076334986e-06, | |
| "loss": 0.5978, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.283507079919438, | |
| "learning_rate": 2.309452984163299e-06, | |
| "loss": 0.6019, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.29529607918677664, | |
| "learning_rate": 2.2447425786194495e-06, | |
| "loss": 0.5985, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.28319448713724227, | |
| "learning_rate": 2.1802478012257296e-06, | |
| "loss": 0.6056, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.2726643263502109, | |
| "learning_rate": 2.1160142098902133e-06, | |
| "loss": 0.603, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.27617806287641067, | |
| "learning_rate": 2.052087178024016e-06, | |
| "loss": 0.5898, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.29198691591599185, | |
| "learning_rate": 1.9885118624903573e-06, | |
| "loss": 0.5976, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.27844261589599106, | |
| "learning_rate": 1.9253331717066014e-06, | |
| "loss": 0.5954, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.2738167375905431, | |
| "learning_rate": 1.8625957339217828e-06, | |
| "loss": 0.5992, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.2885459820853412, | |
| "learning_rate": 1.8003438656920464e-06, | |
| "loss": 0.6038, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.29048300848707215, | |
| "learning_rate": 1.73862154057626e-06, | |
| "loss": 0.6017, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.27617825592983636, | |
| "learning_rate": 1.6774723580739071e-06, | |
| "loss": 0.6029, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3013766038530734, | |
| "learning_rate": 1.6169395128272203e-06, | |
| "loss": 0.6006, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6203941106796265, | |
| "eval_runtime": 445.3298, | |
| "eval_samples_per_second": 26.549, | |
| "eval_steps_per_second": 0.415, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.26563524661619237, | |
| "learning_rate": 1.5570657641092865e-06, | |
| "loss": 0.6231, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.2794923155415762, | |
| "learning_rate": 1.4978934056196946e-06, | |
| "loss": 0.5694, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.2851227269905178, | |
| "learning_rate": 1.4394642356090566e-06, | |
| "loss": 0.5852, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.28857955805833607, | |
| "learning_rate": 1.3818195273535004e-06, | |
| "loss": 0.5831, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.27216092884865906, | |
| "learning_rate": 1.3250000000000007e-06, | |
| "loss": 0.5862, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.27002519179009965, | |
| "learning_rate": 1.2690457898031274e-06, | |
| "loss": 0.5837, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.2818496425678644, | |
| "learning_rate": 1.2139964217735515e-06, | |
| "loss": 0.5784, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.2701221946191815, | |
| "learning_rate": 1.1598907817583052e-06, | |
| "loss": 0.5793, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.27574524946055073, | |
| "learning_rate": 1.1067670889725543e-06, | |
| "loss": 0.5842, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.28332150705773357, | |
| "learning_rate": 1.0546628690022486e-06, | |
| "loss": 0.583, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.28208882210607483, | |
| "learning_rate": 1.0036149272967555e-06, | |
| "loss": 0.5727, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.27609878433695273, | |
| "learning_rate": 9.536593231701712e-07, | |
| "loss": 0.5816, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.2723619847411987, | |
| "learning_rate": 9.048313443296946e-07, | |
| "loss": 0.5818, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.2783655922018766, | |
| "learning_rate": 8.571654819490528e-07, | |
| "loss": 0.5793, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.28395769221124467, | |
| "learning_rate": 8.106954063045759e-07, | |
| "loss": 0.5823, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.26619297702415606, | |
| "learning_rate": 7.654539429911393e-07, | |
| "loss": 0.5837, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.26007555166748264, | |
| "learning_rate": 7.21473049734782e-07, | |
| "loss": 0.5813, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.29062347852432735, | |
| "learning_rate": 6.787837938183603e-07, | |
| "loss": 0.5805, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.27001754809897044, | |
| "learning_rate": 6.374163301361986e-07, | |
| "loss": 0.5887, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.2641873787984356, | |
| "learning_rate": 5.973998798932351e-07, | |
| "loss": 0.5918, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.2678841619133812, | |
| "learning_rate": 5.587627099637106e-07, | |
| "loss": 0.5871, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.2640549717266149, | |
| "learning_rate": 5.215321129239688e-07, | |
| "loss": 0.5844, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.28491735406972796, | |
| "learning_rate": 4.857343877734967e-07, | |
| "loss": 0.5912, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.2642156408420045, | |
| "learning_rate": 4.5139482135779634e-07, | |
| "loss": 0.5842, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.26299866058202587, | |
| "learning_rate": 4.185376705062326e-07, | |
| "loss": 0.5824, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.27022994903364983, | |
| "learning_rate": 3.871861448974572e-07, | |
| "loss": 0.5816, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.26101508013637187, | |
| "learning_rate": 3.5736239066452864e-07, | |
| "loss": 0.5813, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.27121387507355627, | |
| "learning_rate": 3.290874747512937e-07, | |
| "loss": 0.5815, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.2738045404235721, | |
| "learning_rate": 3.023813700310924e-07, | |
| "loss": 0.5822, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.2721835115660428, | |
| "learning_rate": 2.772629411982938e-07, | |
| "loss": 0.5855, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.2622528907504856, | |
| "learning_rate": 2.5374993144262513e-07, | |
| "loss": 0.5806, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.2577523362544388, | |
| "learning_rate": 2.3185894991571576e-07, | |
| "loss": 0.5815, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.26363889309421545, | |
| "learning_rate": 2.1160545999870025e-07, | |
| "loss": 0.5846, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.2598425521048642, | |
| "learning_rate": 1.9300376837917695e-07, | |
| "loss": 0.5878, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.2609543392451375, | |
| "learning_rate": 1.7606701494522731e-07, | |
| "loss": 0.5776, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.254572723920624, | |
| "learning_rate": 1.6080716350364643e-07, | |
| "loss": 0.576, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.2606448147342431, | |
| "learning_rate": 1.472349933289327e-07, | |
| "loss": 0.591, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.2608934236555304, | |
| "learning_rate": 1.35360091549008e-07, | |
| "loss": 0.5879, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.26384395992199977, | |
| "learning_rate": 1.2519084637305096e-07, | |
| "loss": 0.5849, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.2665820989189118, | |
| "learning_rate": 1.1673444116622113e-07, | |
| "loss": 0.5859, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.2621212407779067, | |
| "learning_rate": 1.0999684937546469e-07, | |
| "loss": 0.576, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.2631271259876569, | |
| "learning_rate": 1.0498283030998208e-07, | |
| "loss": 0.5759, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.27152984911342587, | |
| "learning_rate": 1.0169592577934177e-07, | |
| "loss": 0.5773, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.26902594864871143, | |
| "learning_rate": 1.0013845759161108e-07, | |
| "loss": 0.5874, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6206473112106323, | |
| "eval_runtime": 443.5397, | |
| "eval_samples_per_second": 26.656, | |
| "eval_steps_per_second": 0.417, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6148462799949007, | |
| "train_runtime": 70693.2258, | |
| "train_samples_per_second": 9.532, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |