| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 2.667692336452461, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7534, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.8063888051525331, | |
| "learning_rate": 5e-06, | |
| "loss": 0.699, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9060943666753217, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6792, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.863100204101913, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6793, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.8020158873807923, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6755, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.684096154720575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6553, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.45007895438831524, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6522, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.3793094921700233, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6551, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.3149452056766187, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6399, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.3009594867680131, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6412, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.34212181307790984, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6325, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.3071741827743272, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6407, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.2921267402694689, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6417, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.3086951712828636, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6438, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.31203378476932964, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6363, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.31089407638977484, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6415, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.35439340860374086, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6333, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.3029281442256716, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6389, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.3035263406218673, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6314, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.30613699941499445, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6395, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3097244580495573, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6358, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.30052514890974735, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6395, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.3247757790920796, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6351, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.3002320555356954, | |
| "learning_rate": 5e-06, | |
| "loss": 0.631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.33788540070089457, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6359, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.29983726730351934, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6341, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.3083145101540858, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6313, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.31499238828203335, | |
| "learning_rate": 5e-06, | |
| "loss": 0.642, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.32514749431753714, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6307, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.28853752402247734, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.2977250374761748, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6286, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.33869550896521733, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6292, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.30377512470657203, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6259, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.3285936613654986, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6317, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.3338489009754525, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6388, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.31401301210309734, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6305, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.32729904251625536, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6242, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.3137460681429382, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6365, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.32750286290464264, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6351, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.2982635849558681, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6383, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.2855401575649863, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6345, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.3006500631775212, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6215, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.34295212142334275, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6345, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6251533031463623, | |
| "eval_runtime": 443.2695, | |
| "eval_samples_per_second": 26.672, | |
| "eval_steps_per_second": 0.417, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.35024631622777125, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6472, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.33002240579640935, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6017, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.31651186830890377, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5955, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.2916006238361447, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5964, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.3048367563892593, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6038, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.3276162015278515, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6089, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.29051409762342256, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5987, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.309815710665164, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5966, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.2943518362832322, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6023, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.3206888044455193, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5953, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.28810098486400076, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6056, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.33684168520106755, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.2930925524871367, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6037, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.2997084038260789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5977, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.2724633931757638, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5962, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.29594928695290523, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5959, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.3509170428001788, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6016, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.31871308148877675, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6016, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.29158762025250556, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6041, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.31932103979021054, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.30549722640769267, | |
| "learning_rate": 5e-06, | |
| "loss": 0.597, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.2951879017910213, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5949, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.2890301521070669, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5946, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.3152131309167344, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6023, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.27457370025182487, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5908, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.32514865685817596, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5948, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.2869347820786928, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6103, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.3130330176159502, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5973, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.29489095764802076, | |
| "learning_rate": 5e-06, | |
| "loss": 0.602, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.3334123404509505, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5951, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.3211675496150495, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5962, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.2952767746051378, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5935, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.3122013602871499, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5975, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.29493014283340135, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5939, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.30726201417055676, | |
| "learning_rate": 5e-06, | |
| "loss": 0.601, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.29691381334334693, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5985, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.31312793491026397, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5854, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.30709319545073666, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5932, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.3023157832075651, | |
| "learning_rate": 5e-06, | |
| "loss": 0.591, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.32438843189452765, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5948, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.3133407951548133, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5992, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.2991755831382789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5974, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.3113336058021223, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5987, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3520179966585449, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5961, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6187062859535217, | |
| "eval_runtime": 442.9315, | |
| "eval_samples_per_second": 26.693, | |
| "eval_steps_per_second": 0.418, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.3402585851685105, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6155, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.34358400255798927, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5547, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.30065738065627035, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5698, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.350961553117733, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5679, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.33412845962784443, | |
| "learning_rate": 5e-06, | |
| "loss": 0.571, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.28301100428565806, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.29246926901624365, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5637, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.3085716692725879, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5646, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.31238615273409037, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5697, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.2889893652608361, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5691, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.28744425674496726, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5588, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.31862553873260213, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5678, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.30176325862165776, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5682, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.3415868984406676, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5656, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.3053031592130626, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5687, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.34927703789690623, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5699, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.2971790308579545, | |
| "learning_rate": 5e-06, | |
| "loss": 0.568, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.30598279561257147, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5673, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.3038193133765918, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5756, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.30594054629347417, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5785, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.3047017356999142, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5738, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.30228571032388474, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5714, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.30561689682501575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5778, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.3218399680160419, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5714, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.29888426709529753, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5695, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.30030693301168837, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5689, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.31110920572933626, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5685, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.30377446561184135, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5688, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.30883960403656696, | |
| "learning_rate": 5e-06, | |
| "loss": 0.57, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.295333664191936, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5728, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.29666095565935197, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5679, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.2896839269345245, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.28697330475888133, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5722, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.2937887632208763, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5756, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.287081423243111, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5654, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.29865384294519426, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5639, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.31153098262281526, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5787, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.3019801645510001, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5754, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.29801055775475277, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5725, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.3022646107633967, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5735, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.288708077282271, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5637, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.3003385354677671, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5639, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.35679915826472547, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5651, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.3072621489316874, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5749, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.618656575679779, | |
| "eval_runtime": 442.1394, | |
| "eval_samples_per_second": 26.74, | |
| "eval_steps_per_second": 0.418, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6039760675241596, | |
| "train_runtime": 70715.1755, | |
| "train_samples_per_second": 9.529, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |