| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 1.0060717937312023, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7524, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.7891563140905129, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6969, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9139530591526533, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6771, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.8498162025655388, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6775, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.7817634582289844, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6738, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.6293849390879412, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6538, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.41921457987793875, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6513, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.36300206030899534, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6548, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.309957963162278, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6398, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.32328999508305195, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6411, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.330574745397823, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6325, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.29931157739012315, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6407, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.34029859245399474, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6417, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.30127106255202635, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6438, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.3151006777409588, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6363, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.33032068654862884, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6416, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.34687627705792795, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6333, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.30299829941401296, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6389, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.32126277950058085, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6315, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.32181196392081757, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6395, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.29027414461556106, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6359, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.3004935364337486, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6396, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.3182054190088126, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6351, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.29257308790197833, | |
| "learning_rate": 5e-06, | |
| "loss": 0.631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.3281874063385835, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6359, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.30750195742038006, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6342, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.3054834309240547, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6313, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.31232338920961195, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6421, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.33312268715489884, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6308, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.28430439482527575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.30649270069730716, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6287, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3040140255060057, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6293, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.3545999092593587, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6259, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.30986750638847393, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6317, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.3216062543695188, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6388, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.317954119212454, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6306, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.3552789384520918, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6243, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.2927240780631906, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6366, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.3362026135681421, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6352, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.3171566741033119, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6384, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.3178698000797058, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6346, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.3103202303938852, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6215, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.3815191468278998, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6345, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6251751184463501, | |
| "eval_runtime": 443.8724, | |
| "eval_samples_per_second": 26.636, | |
| "eval_steps_per_second": 0.417, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.33172834615136476, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.34424149066722537, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6019, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.3028655590581102, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5957, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.2884231268619158, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5967, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.30659760009174, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6042, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.32926249950938313, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6093, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.3158323453023191, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5991, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.3031613959301613, | |
| "learning_rate": 5e-06, | |
| "loss": 0.597, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.288147157900111, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6027, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.30665751637918065, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5957, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.3188310134389718, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6059, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.3056881609648514, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6025, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.27741435521459135, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6041, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.30256280677733355, | |
| "learning_rate": 5e-06, | |
| "loss": 0.598, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.2849315058896214, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5966, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.28805599862814474, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5962, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.30205024558551485, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6018, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.3082719282952612, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6019, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.3149801306843495, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6045, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.3588632396980364, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6025, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.32450978554155874, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5974, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.3072016967628761, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5953, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.3234819157170789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.595, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.33073183709411097, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6026, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.2827694243710057, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5911, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.313252659198283, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5951, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.3006117467907023, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6106, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.30612410275194285, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5977, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.3024691477363925, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.2977897390172667, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5953, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.36185544494498323, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5965, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.2841927419626393, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5938, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.3419351965360681, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5977, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.30800250495317405, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5941, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.29984584493486444, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6013, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.30044148053925274, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5988, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.3025363179456475, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5856, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.3145503828700074, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5935, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.35065394981808123, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5912, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.2974502518481575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5951, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.32731845865746034, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5996, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.31162936799394053, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5976, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.32719072803884974, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5989, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3101089792190507, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5964, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6190649271011353, | |
| "eval_runtime": 441.9179, | |
| "eval_samples_per_second": 26.754, | |
| "eval_steps_per_second": 0.419, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.31583692108300393, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6158, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.3549258835016534, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5549, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.33724473694318774, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5702, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.3141133261122478, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5684, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.29764089324641324, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5714, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.28790057942161257, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5696, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.29743546832324724, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5642, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.2954585305184846, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5651, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.3050175730295642, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5702, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.32132099983730744, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5696, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.2837390182666578, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5593, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.29469997963523875, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5683, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.3120611664539125, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5686, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.3537751093167747, | |
| "learning_rate": 5e-06, | |
| "loss": 0.566, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.2852167762719538, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5691, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.32111957707443545, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5704, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.3012654149735683, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5684, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.308454191252784, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5677, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.31064428522383297, | |
| "learning_rate": 5e-06, | |
| "loss": 0.576, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.3058217002467149, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5789, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.32737211111120573, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5742, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.28097098781753427, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5718, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.3370867700826581, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5783, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.3481252533458522, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5718, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.28443121713707625, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5699, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.28884871195764056, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5693, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.3009832609905576, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5689, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.3048094427833053, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5692, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.3295891153062313, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5703, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.2965154504918107, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5731, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.3026395616545691, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5683, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.3390836756743429, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5694, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.31852178230857053, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5726, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.28505692332238153, | |
| "learning_rate": 5e-06, | |
| "loss": 0.576, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.296853334138319, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5658, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.2995278356454653, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5643, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.31122420075903284, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5791, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.28755094750291593, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5757, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.31965652378893494, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5729, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.32825908653753605, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5739, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.3071446259514345, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5641, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.3148839567474544, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5643, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.2929400491067566, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5654, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.31295742772627644, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5752, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6186820268630981, | |
| "eval_runtime": 441.0146, | |
| "eval_samples_per_second": 26.809, | |
| "eval_steps_per_second": 0.419, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6041361260450413, | |
| "train_runtime": 70558.5882, | |
| "train_samples_per_second": 9.55, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |