| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 1.8413729954355371, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7563, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.9090731736201849, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7031, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9459277239616597, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6819, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.8734617594615591, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6816, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.8261584435284774, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6779, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.7440549988310982, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6577, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.5455785570158392, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6543, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.44565303721946115, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6565, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.3345108653267818, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6406, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.3017230444715495, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6416, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.3487395918830048, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6328, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.3061594827931602, | |
| "learning_rate": 5e-06, | |
| "loss": 0.641, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.2911872675140233, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6418, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.30262387115888767, | |
| "learning_rate": 5e-06, | |
| "loss": 0.644, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.31911557551796654, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6364, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.3090144942423189, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6416, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.3432937062863081, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6334, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.2963500618383741, | |
| "learning_rate": 5e-06, | |
| "loss": 0.639, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.3046405486703939, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6315, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.30868284531879653, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6396, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3052199231164997, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6358, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.30277698554242666, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6396, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.3211995792289639, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6352, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.3084337333085751, | |
| "learning_rate": 5e-06, | |
| "loss": 0.631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.3288256202640419, | |
| "learning_rate": 5e-06, | |
| "loss": 0.636, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.29706868313775225, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6341, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.3144886815645999, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6313, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.3190791089525923, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6421, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3249332782376004, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6308, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.2890796151811709, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.29471089880956497, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6287, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3504934958900394, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6293, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.3060990184286491, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6259, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.3289729668080366, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6318, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.33222515917483914, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6388, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.3207534239220903, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6306, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.33215225225215206, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6243, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.3182925266440484, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6366, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.325414228217383, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6352, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.3025365876017664, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6384, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.28492973089527013, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6346, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.30734094147253904, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6215, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.3478513457272329, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6345, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6252126097679138, | |
| "eval_runtime": 440.0949, | |
| "eval_samples_per_second": 26.865, | |
| "eval_steps_per_second": 0.42, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.3488388961703569, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.3329038249349578, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.2985792359841702, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5958, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.29798147636089894, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5967, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.3136045258788408, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6042, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.32139743047390607, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6093, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.28823096821921135, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5992, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.32266798489044735, | |
| "learning_rate": 5e-06, | |
| "loss": 0.597, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.29656108233094763, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6028, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.32368493962158085, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5957, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.2887731259651382, | |
| "learning_rate": 5e-06, | |
| "loss": 0.606, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.3326828983225666, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6026, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.2816607282940196, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6041, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.3068622260530732, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5981, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.2693800588461558, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5966, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.3004036329413474, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5963, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.35148473854361456, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6019, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.3223179469485186, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6021, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.3022220988285776, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6046, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.3135095540825169, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6026, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.3093408676490943, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5975, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.3088418507383678, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5953, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.30245463513544485, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5951, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.31000122154452087, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6027, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.2780050823897534, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5912, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.3196358258240848, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5952, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.28599364965364427, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6107, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.3054349065856669, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5978, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.2958356022094109, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6024, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.33303189252258475, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5955, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.31075349861762674, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5965, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.3041316275923669, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5939, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.30054925578082764, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5978, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.29626082487075206, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5943, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.30547959900976057, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6014, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.2968969729207538, | |
| "learning_rate": 5e-06, | |
| "loss": 0.599, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.3190259364697627, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5858, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.31206301148081805, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5937, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.30153757521039576, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5914, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.31684333297637707, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5952, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.30790370015451796, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5997, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.3006206063192757, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5978, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.3103260667118688, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5992, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.35613689693361866, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5966, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6188303232192993, | |
| "eval_runtime": 439.2208, | |
| "eval_samples_per_second": 26.918, | |
| "eval_steps_per_second": 0.421, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.3285187021226287, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6162, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.33691597571231074, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5558, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.31663461678384935, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5709, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.35434689966692323, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.30905451143792223, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5721, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.2817008598103781, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5701, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.31760410729881533, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5648, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.31805344194855023, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5658, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.31812158359352355, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5709, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.2966251323278132, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5702, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.28862934268543866, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5599, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.3105559198866554, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5689, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.3058494575287101, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5692, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.30374745612458004, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5667, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.3137503716862055, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5697, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.3515776830000683, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5711, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.3075223218995559, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.3111410180440985, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5684, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.30184704992462885, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5767, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.2941540757209925, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5795, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.298175427179016, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5748, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.3004944527012022, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5724, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.3169297651732949, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5789, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.30842610241832297, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5723, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.2992044788640401, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5705, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.31941674485831356, | |
| "learning_rate": 5e-06, | |
| "loss": 0.57, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.3034725401202754, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5696, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.30719293572688333, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5699, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.2981195886569699, | |
| "learning_rate": 5e-06, | |
| "loss": 0.571, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.29437080866298027, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5737, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.29928822023457585, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5689, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.28901219115789856, | |
| "learning_rate": 5e-06, | |
| "loss": 0.57, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.29779948711219933, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5732, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.2870917081594304, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5765, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.28170905684828323, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5664, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.2970061896232385, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5649, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.3163195453999029, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5798, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.29779080626080356, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5764, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.3028382852058591, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5736, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.3016528790992581, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5745, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.28870249161646794, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5647, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.30096166056048607, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5649, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.3478647432780804, | |
| "learning_rate": 5e-06, | |
| "loss": 0.566, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.306295401189064, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5759, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.618750810623169, | |
| "eval_runtime": 437.9782, | |
| "eval_samples_per_second": 26.994, | |
| "eval_steps_per_second": 0.422, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6046473435615296, | |
| "train_runtime": 69998.9286, | |
| "train_samples_per_second": 9.627, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |