| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 0.9639704104941377, | |
| "learning_rate": 8e-06, | |
| "loss": 0.7439, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.8442173824916553, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6913, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9135177104158955, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6706, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.7447503678432672, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6689, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.44266019319258165, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6641, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.30998631234245, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6461, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.3164751503953061, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6459, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.3350368477653695, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6508, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.3086185193667254, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6363, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.33448171137595584, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6379, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.3384106640783101, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6294, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.30260557253018994, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6376, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.33298272040177757, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6387, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.31265997710460536, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6408, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.31516199031803915, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6333, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.31682011905006013, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6386, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.3564513676586145, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6303, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.3306179870426032, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6359, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.33474028648939197, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6286, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.3206144090448144, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6365, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3177671154762098, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6329, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.3021654059092019, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6366, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.3290669088275951, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6322, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.30424722815187283, | |
| "learning_rate": 8e-06, | |
| "loss": 0.628, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.3474823315395405, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6329, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.3168891366378092, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6312, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.34553565431951916, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6283, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.3332271782588716, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6391, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3880170201690876, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6278, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.3130673004887942, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6258, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.3358637038063554, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6257, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3399489989506044, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6263, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.38771766102214283, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6229, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.3283255856874223, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6286, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.3268975490281503, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6357, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.32733368412799924, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6276, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.3624716188998123, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6212, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.3061531887849296, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6336, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.36678334857855494, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6321, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.34292337617119184, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6352, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.33405894563773253, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6315, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.3164130023383584, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6185, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.3977987648498147, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6314, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6220688819885254, | |
| "eval_runtime": 442.3463, | |
| "eval_samples_per_second": 26.728, | |
| "eval_steps_per_second": 0.418, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.41535494548627566, | |
| "learning_rate": 8e-06, | |
| "loss": 0.6423, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.3735476403023979, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5863, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.3183198999243994, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5799, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.3175544461491631, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5808, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.32855484029978815, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5884, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.3222089893505811, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5935, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.34758032863197696, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5839, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.3196612744024533, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5819, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.2975047803907781, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5876, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.3241282573148342, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5809, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.3304353844650285, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5911, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.3216443108997831, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5878, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.28632058626541573, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5896, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.32577387421456516, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5839, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.29722092652806653, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5824, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.30497645231514076, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5821, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.3356906043288017, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5877, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.3197713793402043, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5878, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.3184351238575717, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5906, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.38972545692696076, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5887, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.33229014658178607, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5836, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.31309389506608065, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5817, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.370537402842656, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5814, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.3857314872058338, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5889, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.2943021984178184, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5778, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.33551835524618123, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5816, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.3129729258150647, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5971, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.3281106159791198, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5844, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.30736214724993627, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5889, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.31301654778685145, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5822, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.3832408006580538, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5834, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.2881229879737456, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5809, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.33079937680811516, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5846, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.3121300231001629, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5812, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.32381306898398965, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5884, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.3132261319703939, | |
| "learning_rate": 8e-06, | |
| "loss": 0.586, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.3109108661140414, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5732, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.3378524689763439, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5808, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.3682104905353802, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5787, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.3166688390782565, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5824, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.3390663411975044, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5869, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.30499332586283723, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5851, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.31339130972566714, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5864, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.33397359259310516, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5839, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6177216172218323, | |
| "eval_runtime": 442.0957, | |
| "eval_samples_per_second": 26.743, | |
| "eval_steps_per_second": 0.418, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.3942199397790381, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5988, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.3622133382325446, | |
| "learning_rate": 8e-06, | |
| "loss": 0.529, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.3051101757043286, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5431, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.31808930925704476, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5415, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.3053872915751511, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5444, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.30464054596272677, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5431, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.3314113247332846, | |
| "learning_rate": 8e-06, | |
| "loss": 0.538, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.2925179652880053, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5388, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.30911668605275777, | |
| "learning_rate": 8e-06, | |
| "loss": 0.544, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.3199475341742108, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5438, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.32055573087102224, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5341, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.3186808852851889, | |
| "learning_rate": 8e-06, | |
| "loss": 0.543, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.33549794419622847, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5435, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.34091571131374626, | |
| "learning_rate": 8e-06, | |
| "loss": 0.541, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.3040233000106589, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5441, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.32578198574988587, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5454, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.30647179694911697, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5438, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.3183190725429563, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5431, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.3062188640144124, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5512, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.3017755578758498, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5539, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.33019353573088506, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5494, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.2852464261424503, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5476, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.3500005782241224, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5535, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.35098221183935024, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5477, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.30922704201508944, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5458, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.3082574345947817, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5455, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.33169661267365713, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5452, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.315011239745189, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5458, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.38634457368273284, | |
| "learning_rate": 8e-06, | |
| "loss": 0.547, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.308611576841766, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5495, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.3061797999061911, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5449, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.3626848717846874, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5462, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.3447009825960258, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5494, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.2897396756037604, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5526, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.32027753437812284, | |
| "learning_rate": 8e-06, | |
| "loss": 0.543, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.32454406486781057, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5415, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.3600363939988387, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5559, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.30006980036728953, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5525, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.3297390936565714, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5499, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.35731426271290295, | |
| "learning_rate": 8e-06, | |
| "loss": 0.551, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.3324101899396559, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5415, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.34191471108090155, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5419, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.33614143212531566, | |
| "learning_rate": 8e-06, | |
| "loss": 0.543, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.31863479804372463, | |
| "learning_rate": 8e-06, | |
| "loss": 0.5523, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6223628520965576, | |
| "eval_runtime": 442.7522, | |
| "eval_samples_per_second": 26.703, | |
| "eval_steps_per_second": 0.418, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.5901586229216925, | |
| "train_runtime": 70551.5379, | |
| "train_samples_per_second": 9.551, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |