| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 1.5690855586195316, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7545, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.8056158552478072, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7001, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9207825261693261, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6804, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.8642713550145188, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6805, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.8129053965836459, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6767, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.7304734695470014, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6563, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.4919484569664153, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6529, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.39772995119635807, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6555, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.3155756573319741, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6401, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.28568077118882496, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6413, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.31513840218553896, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6325, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.3109212355951042, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6407, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.2971106684855869, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6417, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.29031805082035994, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6438, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.28987791869065926, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6363, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.33577445767568165, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6416, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.37911851845955313, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6334, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.30529204587086856, | |
| "learning_rate": 5e-06, | |
| "loss": 0.639, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.30675659823743023, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6316, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.3255741265150131, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6396, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3036863150363808, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6358, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.30707477954595674, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6396, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.35684409429679365, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6352, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.29819335826069027, | |
| "learning_rate": 5e-06, | |
| "loss": 0.631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.3107576658537844, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6359, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.2972807007971548, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6342, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.3012493432889954, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6313, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.31687946264258104, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6421, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3189451277590996, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6308, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.3066530814639796, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.31330054861485507, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6287, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.30056033144854405, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6292, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.29407210723938776, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6259, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.2845766225620895, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6317, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.30967502531613134, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6388, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.3187131137465586, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6305, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.2997839860252265, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6243, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.31182981046036146, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6365, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.3077350677992891, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6351, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.3267757823931702, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6383, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.3025649944420077, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6346, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.31619951664195245, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6215, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.31962076837164793, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6345, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6251474618911743, | |
| "eval_runtime": 443.9235, | |
| "eval_samples_per_second": 26.633, | |
| "eval_steps_per_second": 0.417, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.29960549619469606, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6473, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.33922615342450413, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6018, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.3728924776716559, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5955, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.28955492836970204, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5963, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.2925145121047745, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6038, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.3127982580336364, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6088, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.28299132104888536, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5987, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.3194558524614371, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5965, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.314668084709381, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.30179531269453824, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5952, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.2985699481482168, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6055, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.3069267970901409, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6021, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.2939709763554783, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6036, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.3180365449914984, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5976, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.27755728832408605, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5962, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.2787446821894519, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5958, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.30418205812439103, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6015, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.2938809693083896, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6016, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.3161223800081249, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6041, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.3020896531411494, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6021, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.33183800445085115, | |
| "learning_rate": 5e-06, | |
| "loss": 0.597, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.29406094479384615, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5949, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.2868473430413891, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5945, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.3106634144386908, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6022, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.27566428923059627, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5907, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.33889687412670905, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5947, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.2847420920071771, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6103, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.30777817035505795, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5973, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.29678294647836445, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6019, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.30315774841491816, | |
| "learning_rate": 5e-06, | |
| "loss": 0.595, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.3116517992136006, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5961, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.3052794212427207, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5935, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.28238611211304954, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5974, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.30450144974178467, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5938, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.3283100116097256, | |
| "learning_rate": 5e-06, | |
| "loss": 0.601, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.3258018720493811, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5985, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.3142714756604907, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5854, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.29328338846875035, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5932, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.28243081898768063, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5909, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.2942719283653051, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5948, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.3044082601109506, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5992, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.3044938732131104, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5973, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.2983768003925129, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5986, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3326048250446988, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5961, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.6187193393707275, | |
| "eval_runtime": 444.2225, | |
| "eval_samples_per_second": 26.615, | |
| "eval_steps_per_second": 0.416, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.3437181801012376, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6154, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.32801281568921176, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5547, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.33838936003128184, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5697, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.3158995901278963, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5678, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.30561995853647556, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5709, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.30753390340043946, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.2910271974183706, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5636, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.29583077233054444, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5646, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.3145696247080961, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5696, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.31441066321233263, | |
| "learning_rate": 5e-06, | |
| "loss": 0.569, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.28392340006340083, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5587, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.3234818724836778, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5678, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.30068529088212365, | |
| "learning_rate": 5e-06, | |
| "loss": 0.568, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.285943793236616, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5655, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.301510817610859, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5686, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.31679137334029156, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5699, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.30059201776131406, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5679, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.3038921273955832, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5672, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.29083835255468765, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5755, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.29496775560015, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5784, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.297083821157512, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5737, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.29570343214264155, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5713, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.3177279236129719, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5777, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.3072203724981258, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5713, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.294284801839864, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5694, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.30785292280336074, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5688, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.3011432785345076, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5685, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.31398882827014146, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5688, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.29018834844143315, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5699, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.2945218054148125, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5727, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.31309839057474065, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5678, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.29355232076160415, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5689, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.2823937055096976, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5721, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.28946207186639944, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5755, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.2962183813044789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5654, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.2987198527234997, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5639, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.3085153547504659, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5787, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.28697558904749576, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5753, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.31068204780067543, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5725, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.3152316896719821, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5735, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.3344195713059621, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5637, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.30828974313229623, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5639, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.29998131422893776, | |
| "learning_rate": 5e-06, | |
| "loss": 0.565, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.3177308537301857, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5748, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6187449097633362, | |
| "eval_runtime": 446.1562, | |
| "eval_samples_per_second": 26.5, | |
| "eval_steps_per_second": 0.415, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.6040076841139539, | |
| "train_runtime": 71054.678, | |
| "train_samples_per_second": 9.484, | |
| "train_steps_per_second": 0.018 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |