| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.996011396011396, | |
| "eval_steps": 500, | |
| "global_step": 1314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022792022792022793, | |
| "grad_norm": 1.525165904903376, | |
| "learning_rate": 4.997501873438867e-06, | |
| "loss": 0.7554, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045584045584045586, | |
| "grad_norm": 0.8053498795260766, | |
| "learning_rate": 4.995007487521836e-06, | |
| "loss": 0.7012, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06837606837606838, | |
| "grad_norm": 0.9238843089964248, | |
| "learning_rate": 4.992516832922945e-06, | |
| "loss": 0.6799, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09116809116809117, | |
| "grad_norm": 0.8666218585982866, | |
| "learning_rate": 4.990029900348746e-06, | |
| "loss": 0.6795, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11396011396011396, | |
| "grad_norm": 0.8088508819396967, | |
| "learning_rate": 4.987546680538165e-06, | |
| "loss": 0.6755, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13675213675213677, | |
| "grad_norm": 0.6691655296371665, | |
| "learning_rate": 4.985067164262359e-06, | |
| "loss": 0.655, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15954415954415954, | |
| "grad_norm": 0.43456809500231514, | |
| "learning_rate": 4.98259134232457e-06, | |
| "loss": 0.6519, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18233618233618235, | |
| "grad_norm": 0.3714072334118622, | |
| "learning_rate": 4.980119205559974e-06, | |
| "loss": 0.6549, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 0.32002706744599263, | |
| "learning_rate": 4.977650744835555e-06, | |
| "loss": 0.6397, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22792022792022792, | |
| "grad_norm": 0.30081132793404947, | |
| "learning_rate": 4.975185951049947e-06, | |
| "loss": 0.641, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25071225071225073, | |
| "grad_norm": 0.33713425539947894, | |
| "learning_rate": 4.972724815133302e-06, | |
| "loss": 0.6323, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27350427350427353, | |
| "grad_norm": 0.3102451954821332, | |
| "learning_rate": 4.970267328047151e-06, | |
| "loss": 0.6406, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.29069740779199893, | |
| "learning_rate": 4.9678134807842575e-06, | |
| "loss": 0.6416, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3190883190883191, | |
| "grad_norm": 0.31110922605055114, | |
| "learning_rate": 4.965363264368484e-06, | |
| "loss": 0.6437, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 0.30841690869415606, | |
| "learning_rate": 4.962916669854652e-06, | |
| "loss": 0.6362, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3646723646723647, | |
| "grad_norm": 0.3100448846559202, | |
| "learning_rate": 4.960473688328407e-06, | |
| "loss": 0.6414, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38746438746438744, | |
| "grad_norm": 0.3488301271178644, | |
| "learning_rate": 4.95803431090608e-06, | |
| "loss": 0.6332, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 0.29863658820743894, | |
| "learning_rate": 4.955598528734554e-06, | |
| "loss": 0.6387, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43304843304843305, | |
| "grad_norm": 0.3039290966360699, | |
| "learning_rate": 4.953166332991125e-06, | |
| "loss": 0.6313, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45584045584045585, | |
| "grad_norm": 0.30706559541216405, | |
| "learning_rate": 4.950737714883372e-06, | |
| "loss": 0.6394, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47863247863247865, | |
| "grad_norm": 0.3082675137287457, | |
| "learning_rate": 4.948312665649022e-06, | |
| "loss": 0.6357, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5014245014245015, | |
| "grad_norm": 0.30127921218162695, | |
| "learning_rate": 4.945891176555817e-06, | |
| "loss": 0.6394, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5242165242165242, | |
| "grad_norm": 0.3196585894616986, | |
| "learning_rate": 4.943473238901383e-06, | |
| "loss": 0.635, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5470085470085471, | |
| "grad_norm": 0.30078793486520006, | |
| "learning_rate": 4.941058844013094e-06, | |
| "loss": 0.6309, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5698005698005698, | |
| "grad_norm": 0.33427271614606124, | |
| "learning_rate": 4.938647983247949e-06, | |
| "loss": 0.6358, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.3016972471723858, | |
| "learning_rate": 4.936240647992436e-06, | |
| "loss": 0.634, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.3115162799082423, | |
| "learning_rate": 4.933836829662409e-06, | |
| "loss": 0.6312, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6381766381766382, | |
| "grad_norm": 0.3256775803287148, | |
| "learning_rate": 4.9314365197029475e-06, | |
| "loss": 0.642, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6609686609686609, | |
| "grad_norm": 0.3189814595928396, | |
| "learning_rate": 4.9290397095882446e-06, | |
| "loss": 0.6306, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 0.29642424266007356, | |
| "learning_rate": 4.9266463908214664e-06, | |
| "loss": 0.6287, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7065527065527065, | |
| "grad_norm": 0.29650226463241985, | |
| "learning_rate": 4.924256554934632e-06, | |
| "loss": 0.6285, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7293447293447294, | |
| "grad_norm": 0.3429160287206981, | |
| "learning_rate": 4.9218701934884865e-06, | |
| "loss": 0.6291, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7521367521367521, | |
| "grad_norm": 0.30230466534717343, | |
| "learning_rate": 4.919487298072377e-06, | |
| "loss": 0.6258, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7749287749287749, | |
| "grad_norm": 0.34202573052544266, | |
| "learning_rate": 4.917107860304125e-06, | |
| "loss": 0.6316, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7977207977207977, | |
| "grad_norm": 0.33516486005488866, | |
| "learning_rate": 4.914731871829905e-06, | |
| "loss": 0.6387, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 0.3211212871249072, | |
| "learning_rate": 4.912359324324121e-06, | |
| "loss": 0.6304, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8433048433048433, | |
| "grad_norm": 0.322137683454166, | |
| "learning_rate": 4.909990209489284e-06, | |
| "loss": 0.6242, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8660968660968661, | |
| "grad_norm": 0.31550278369753354, | |
| "learning_rate": 4.907624519055888e-06, | |
| "loss": 0.6365, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.3260141043187448, | |
| "learning_rate": 4.905262244782294e-06, | |
| "loss": 0.6351, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9116809116809117, | |
| "grad_norm": 0.3040813959954933, | |
| "learning_rate": 4.902903378454601e-06, | |
| "loss": 0.6383, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9344729344729344, | |
| "grad_norm": 0.2849114973684705, | |
| "learning_rate": 4.900547911886537e-06, | |
| "loss": 0.6345, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9572649572649573, | |
| "grad_norm": 0.2972340394377098, | |
| "learning_rate": 4.898195836919327e-06, | |
| "loss": 0.6214, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.98005698005698, | |
| "grad_norm": 0.34508028269623636, | |
| "learning_rate": 4.895847145421587e-06, | |
| "loss": 0.6344, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9982905982905983, | |
| "eval_loss": 0.6250626444816589, | |
| "eval_runtime": 441.6427, | |
| "eval_samples_per_second": 26.771, | |
| "eval_steps_per_second": 0.419, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.0034188034188034, | |
| "grad_norm": 0.3476146349698735, | |
| "learning_rate": 4.893501829289195e-06, | |
| "loss": 0.6472, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0262108262108263, | |
| "grad_norm": 0.3276756414551046, | |
| "learning_rate": 4.891159880445185e-06, | |
| "loss": 0.6016, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.049002849002849, | |
| "grad_norm": 0.3019871041088552, | |
| "learning_rate": 4.888821290839617e-06, | |
| "loss": 0.5954, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0717948717948718, | |
| "grad_norm": 0.29126627395932514, | |
| "learning_rate": 4.886486052449469e-06, | |
| "loss": 0.5963, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0945868945868946, | |
| "grad_norm": 0.30453199438005507, | |
| "learning_rate": 4.8841541572785224e-06, | |
| "loss": 0.6038, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1173789173789175, | |
| "grad_norm": 0.3278803012067222, | |
| "learning_rate": 4.881825597357242e-06, | |
| "loss": 0.6089, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1401709401709401, | |
| "grad_norm": 0.2965789673527869, | |
| "learning_rate": 4.8795003647426654e-06, | |
| "loss": 0.5987, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.162962962962963, | |
| "grad_norm": 0.31265022709916646, | |
| "learning_rate": 4.877178451518289e-06, | |
| "loss": 0.5966, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1857549857549858, | |
| "grad_norm": 0.3006543348402962, | |
| "learning_rate": 4.8748598497939494e-06, | |
| "loss": 0.6023, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.2085470085470085, | |
| "grad_norm": 0.32429016802211824, | |
| "learning_rate": 4.872544551705718e-06, | |
| "loss": 0.5953, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2313390313390313, | |
| "grad_norm": 0.29037801778251926, | |
| "learning_rate": 4.870232549415787e-06, | |
| "loss": 0.6056, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2541310541310542, | |
| "grad_norm": 0.3348609323819345, | |
| "learning_rate": 4.867923835112355e-06, | |
| "loss": 0.6021, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2769230769230768, | |
| "grad_norm": 0.2831871289987126, | |
| "learning_rate": 4.865618401009519e-06, | |
| "loss": 0.6037, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2997150997150997, | |
| "grad_norm": 0.3006000412993847, | |
| "learning_rate": 4.863316239347163e-06, | |
| "loss": 0.5977, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3225071225071225, | |
| "grad_norm": 0.27196198233146873, | |
| "learning_rate": 4.861017342390847e-06, | |
| "loss": 0.5962, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3452991452991454, | |
| "grad_norm": 0.2967670383985263, | |
| "learning_rate": 4.858721702431704e-06, | |
| "loss": 0.5959, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.368091168091168, | |
| "grad_norm": 0.342796374484054, | |
| "learning_rate": 4.856429311786322e-06, | |
| "loss": 0.6014, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.390883190883191, | |
| "grad_norm": 0.3292423768345227, | |
| "learning_rate": 4.8541401627966426e-06, | |
| "loss": 0.6016, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.4136752136752135, | |
| "grad_norm": 0.299728514910799, | |
| "learning_rate": 4.85185424782985e-06, | |
| "loss": 0.6041, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4364672364672364, | |
| "grad_norm": 0.30887594524743867, | |
| "learning_rate": 4.8495715592782715e-06, | |
| "loss": 0.6021, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4592592592592593, | |
| "grad_norm": 0.305132962765952, | |
| "learning_rate": 4.847292089559258e-06, | |
| "loss": 0.597, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.4820512820512821, | |
| "grad_norm": 0.2969561784557187, | |
| "learning_rate": 4.845015831115093e-06, | |
| "loss": 0.5949, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.504843304843305, | |
| "grad_norm": 0.2870255011777952, | |
| "learning_rate": 4.842742776412874e-06, | |
| "loss": 0.5946, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5276353276353276, | |
| "grad_norm": 0.3136573065822191, | |
| "learning_rate": 4.840472917944417e-06, | |
| "loss": 0.6022, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5504273504273505, | |
| "grad_norm": 0.2775783004125175, | |
| "learning_rate": 4.838206248226147e-06, | |
| "loss": 0.5908, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.573219373219373, | |
| "grad_norm": 0.32400364521332364, | |
| "learning_rate": 4.835942759799002e-06, | |
| "loss": 0.5947, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.596011396011396, | |
| "grad_norm": 0.287719331253849, | |
| "learning_rate": 4.833682445228318e-06, | |
| "loss": 0.6102, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6188034188034188, | |
| "grad_norm": 0.3061981877613935, | |
| "learning_rate": 4.831425297103738e-06, | |
| "loss": 0.5973, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6415954415954417, | |
| "grad_norm": 0.2935986527130749, | |
| "learning_rate": 4.829171308039099e-06, | |
| "loss": 0.6019, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6643874643874645, | |
| "grad_norm": 0.3278074868861201, | |
| "learning_rate": 4.826920470672344e-06, | |
| "loss": 0.595, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6871794871794872, | |
| "grad_norm": 0.31438196017683884, | |
| "learning_rate": 4.824672777665406e-06, | |
| "loss": 0.5961, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.7099715099715098, | |
| "grad_norm": 0.29590816240567525, | |
| "learning_rate": 4.822428221704122e-06, | |
| "loss": 0.5935, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7327635327635327, | |
| "grad_norm": 0.30748668539068935, | |
| "learning_rate": 4.820186795498119e-06, | |
| "loss": 0.5974, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7555555555555555, | |
| "grad_norm": 0.29187796953262835, | |
| "learning_rate": 4.817948491780728e-06, | |
| "loss": 0.5938, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7783475783475784, | |
| "grad_norm": 0.29933508547691845, | |
| "learning_rate": 4.815713303308872e-06, | |
| "loss": 0.6009, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.8011396011396013, | |
| "grad_norm": 0.2964931056797495, | |
| "learning_rate": 4.813481222862981e-06, | |
| "loss": 0.5985, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.823931623931624, | |
| "grad_norm": 0.3119877619286405, | |
| "learning_rate": 4.811252243246881e-06, | |
| "loss": 0.5853, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8467236467236468, | |
| "grad_norm": 0.30077090862615286, | |
| "learning_rate": 4.809026357287709e-06, | |
| "loss": 0.5932, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.8695156695156694, | |
| "grad_norm": 0.30995473438720483, | |
| "learning_rate": 4.806803557835802e-06, | |
| "loss": 0.5909, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8923076923076922, | |
| "grad_norm": 0.3313446191052585, | |
| "learning_rate": 4.804583837764616e-06, | |
| "loss": 0.5947, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.915099715099715, | |
| "grad_norm": 0.30558861743932775, | |
| "learning_rate": 4.802367189970616e-06, | |
| "loss": 0.5992, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.937891737891738, | |
| "grad_norm": 0.30382616090223413, | |
| "learning_rate": 4.8001536073731936e-06, | |
| "loss": 0.5973, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9606837606837608, | |
| "grad_norm": 0.311102028704937, | |
| "learning_rate": 4.797943082914558e-06, | |
| "loss": 0.5986, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9834757834757835, | |
| "grad_norm": 0.3331012493756619, | |
| "learning_rate": 4.795735609559657e-06, | |
| "loss": 0.5961, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9994301994301993, | |
| "eval_loss": 0.618708610534668, | |
| "eval_runtime": 442.8586, | |
| "eval_samples_per_second": 26.697, | |
| "eval_steps_per_second": 0.418, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 2.006837606837607, | |
| "grad_norm": 0.3512930734127694, | |
| "learning_rate": 4.793531180296065e-06, | |
| "loss": 0.6157, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0296296296296297, | |
| "grad_norm": 0.3599795362623166, | |
| "learning_rate": 4.7913297881339085e-06, | |
| "loss": 0.5553, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0524216524216525, | |
| "grad_norm": 0.3164425885661232, | |
| "learning_rate": 4.789131426105757e-06, | |
| "loss": 0.5704, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0752136752136754, | |
| "grad_norm": 0.36033177622984786, | |
| "learning_rate": 4.786936087266542e-06, | |
| "loss": 0.5686, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.098005698005698, | |
| "grad_norm": 0.33104525900123416, | |
| "learning_rate": 4.784743764693455e-06, | |
| "loss": 0.5717, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.1207977207977207, | |
| "grad_norm": 0.29338052297451017, | |
| "learning_rate": 4.7825544514858655e-06, | |
| "loss": 0.5697, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1435897435897435, | |
| "grad_norm": 0.30967219938255186, | |
| "learning_rate": 4.780368140765222e-06, | |
| "loss": 0.5644, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.1663817663817664, | |
| "grad_norm": 0.3116949593974322, | |
| "learning_rate": 4.778184825674966e-06, | |
| "loss": 0.5653, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1891737891737892, | |
| "grad_norm": 0.305260137885279, | |
| "learning_rate": 4.776004499380439e-06, | |
| "loss": 0.5703, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.211965811965812, | |
| "grad_norm": 0.29822541696944915, | |
| "learning_rate": 4.773827155068793e-06, | |
| "loss": 0.5698, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.234757834757835, | |
| "grad_norm": 0.2859968833970714, | |
| "learning_rate": 4.771652785948902e-06, | |
| "loss": 0.5594, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.2575498575498574, | |
| "grad_norm": 0.3174033757002569, | |
| "learning_rate": 4.769481385251275e-06, | |
| "loss": 0.5684, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2803418803418802, | |
| "grad_norm": 0.31269556215191746, | |
| "learning_rate": 4.767312946227961e-06, | |
| "loss": 0.5687, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.303133903133903, | |
| "grad_norm": 0.3051928602107768, | |
| "learning_rate": 4.765147462152471e-06, | |
| "loss": 0.5662, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.325925925925926, | |
| "grad_norm": 0.31501362221921503, | |
| "learning_rate": 4.762984926319677e-06, | |
| "loss": 0.5693, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.348717948717949, | |
| "grad_norm": 0.3448956830891534, | |
| "learning_rate": 4.760825332045738e-06, | |
| "loss": 0.5706, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3715099715099717, | |
| "grad_norm": 0.3026934523794964, | |
| "learning_rate": 4.758668672668006e-06, | |
| "loss": 0.5685, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.394301994301994, | |
| "grad_norm": 0.3099921114639337, | |
| "learning_rate": 4.756514941544941e-06, | |
| "loss": 0.5679, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.417094017094017, | |
| "grad_norm": 0.30114216044285347, | |
| "learning_rate": 4.754364132056025e-06, | |
| "loss": 0.5762, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.43988603988604, | |
| "grad_norm": 0.29726040323415187, | |
| "learning_rate": 4.752216237601676e-06, | |
| "loss": 0.579, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4626780626780627, | |
| "grad_norm": 0.29775393502122444, | |
| "learning_rate": 4.750071251603165e-06, | |
| "loss": 0.5743, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.4854700854700855, | |
| "grad_norm": 0.2943291979563702, | |
| "learning_rate": 4.7479291675025314e-06, | |
| "loss": 0.5719, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.5082621082621084, | |
| "grad_norm": 0.31015578394336496, | |
| "learning_rate": 4.745789978762496e-06, | |
| "loss": 0.5784, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5310541310541312, | |
| "grad_norm": 0.3067404718121527, | |
| "learning_rate": 4.7436536788663765e-06, | |
| "loss": 0.5718, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.5538461538461537, | |
| "grad_norm": 0.30476772110006034, | |
| "learning_rate": 4.74152026131801e-06, | |
| "loss": 0.57, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.5766381766381765, | |
| "grad_norm": 0.3200412884206867, | |
| "learning_rate": 4.739389719641665e-06, | |
| "loss": 0.5694, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5994301994301994, | |
| "grad_norm": 0.30781710889202074, | |
| "learning_rate": 4.7372620473819615e-06, | |
| "loss": 0.5691, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.6222222222222222, | |
| "grad_norm": 0.2990058185323815, | |
| "learning_rate": 4.735137238103785e-06, | |
| "loss": 0.5694, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.645014245014245, | |
| "grad_norm": 0.3039158464988474, | |
| "learning_rate": 4.7330152853922064e-06, | |
| "loss": 0.5705, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.667806267806268, | |
| "grad_norm": 0.2968978926486904, | |
| "learning_rate": 4.730896182852409e-06, | |
| "loss": 0.5733, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.690598290598291, | |
| "grad_norm": 0.2952864087104688, | |
| "learning_rate": 4.72877992410959e-06, | |
| "loss": 0.5684, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.7133903133903132, | |
| "grad_norm": 0.288700561234537, | |
| "learning_rate": 4.7266665028088985e-06, | |
| "loss": 0.5695, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.736182336182336, | |
| "grad_norm": 0.29212831117890853, | |
| "learning_rate": 4.72455591261534e-06, | |
| "loss": 0.5727, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.758974358974359, | |
| "grad_norm": 0.28360998087695266, | |
| "learning_rate": 4.722448147213712e-06, | |
| "loss": 0.5761, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.781766381766382, | |
| "grad_norm": 0.2843161577709465, | |
| "learning_rate": 4.720343200308507e-06, | |
| "loss": 0.5659, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.8045584045584047, | |
| "grad_norm": 0.30629830967088484, | |
| "learning_rate": 4.7182410656238484e-06, | |
| "loss": 0.5644, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.827350427350427, | |
| "grad_norm": 0.31248945067389905, | |
| "learning_rate": 4.716141736903407e-06, | |
| "loss": 0.5792, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8501424501424504, | |
| "grad_norm": 0.2962592773085804, | |
| "learning_rate": 4.714045207910318e-06, | |
| "loss": 0.5759, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.872934472934473, | |
| "grad_norm": 0.2982145036424215, | |
| "learning_rate": 4.71195147242711e-06, | |
| "loss": 0.573, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8957264957264957, | |
| "grad_norm": 0.30389926739813455, | |
| "learning_rate": 4.709860524255622e-06, | |
| "loss": 0.574, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.9185185185185185, | |
| "grad_norm": 0.28975007917463824, | |
| "learning_rate": 4.707772357216934e-06, | |
| "loss": 0.5642, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9413105413105414, | |
| "grad_norm": 0.2984479549098159, | |
| "learning_rate": 4.705686965151282e-06, | |
| "loss": 0.5644, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.9641025641025642, | |
| "grad_norm": 0.3364273603754725, | |
| "learning_rate": 4.703604341917987e-06, | |
| "loss": 0.5655, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9868945868945866, | |
| "grad_norm": 0.30326945997963417, | |
| "learning_rate": 4.701524481395374e-06, | |
| "loss": 0.5753, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "eval_loss": 0.6186444163322449, | |
| "eval_runtime": 441.5391, | |
| "eval_samples_per_second": 26.777, | |
| "eval_steps_per_second": 0.419, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 2.996011396011396, | |
| "step": 1314, | |
| "total_flos": 2755219238682624.0, | |
| "train_loss": 0.604149692316215, | |
| "train_runtime": 70723.2043, | |
| "train_samples_per_second": 9.528, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1314, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2755219238682624.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |