| { | |
| "best_global_step": 1000, | |
| "best_metric": 0.20872141420841217, | |
| "best_model_checkpoint": "./qwen2-7b-math-coder/checkpoint-1000", | |
| "epoch": 0.45808520384791573, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.004580852038479157, | |
| "grad_norm": 3.668745756149292, | |
| "learning_rate": 4.5e-06, | |
| "loss": 2.0337, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.009161704076958314, | |
| "grad_norm": 3.277991533279419, | |
| "learning_rate": 9.5e-06, | |
| "loss": 1.7448, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.013742556115437472, | |
| "grad_norm": 2.6093215942382812, | |
| "learning_rate": 1.45e-05, | |
| "loss": 1.2869, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01832340815391663, | |
| "grad_norm": 1.7964814901351929, | |
| "learning_rate": 1.9500000000000003e-05, | |
| "loss": 0.9077, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.022904260192395786, | |
| "grad_norm": 1.5602203607559204, | |
| "learning_rate": 2.45e-05, | |
| "loss": 0.5228, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.027485112230874943, | |
| "grad_norm": 2.1516997814178467, | |
| "learning_rate": 2.95e-05, | |
| "loss": 0.3872, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0320659642693541, | |
| "grad_norm": 1.4016958475112915, | |
| "learning_rate": 3.45e-05, | |
| "loss": 0.3371, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.03664681630783326, | |
| "grad_norm": 1.5028544664382935, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 0.31, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.04122766834631241, | |
| "grad_norm": 1.7520164251327515, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 0.2495, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.04580852038479157, | |
| "grad_norm": 0.7592225074768066, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 0.238, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.050389372423270726, | |
| "grad_norm": 1.602748990058899, | |
| "learning_rate": 4.978396543446952e-05, | |
| "loss": 0.2472, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.054970224461749886, | |
| "grad_norm": 0.8727825880050659, | |
| "learning_rate": 4.954392702832453e-05, | |
| "loss": 0.2455, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.05955107650022904, | |
| "grad_norm": 0.8755759596824646, | |
| "learning_rate": 4.930388862217955e-05, | |
| "loss": 0.2514, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0641319285387082, | |
| "grad_norm": 1.3854660987854004, | |
| "learning_rate": 4.906385021603457e-05, | |
| "loss": 0.2415, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.06871278057718735, | |
| "grad_norm": 0.6988345384597778, | |
| "learning_rate": 4.8823811809889585e-05, | |
| "loss": 0.248, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.07329363261566652, | |
| "grad_norm": 0.7716445326805115, | |
| "learning_rate": 4.8583773403744604e-05, | |
| "loss": 0.2289, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.07787448465414568, | |
| "grad_norm": 0.618770956993103, | |
| "learning_rate": 4.8343734997599615e-05, | |
| "loss": 0.2631, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.08245533669262482, | |
| "grad_norm": 0.311239093542099, | |
| "learning_rate": 4.8103696591454634e-05, | |
| "loss": 0.2291, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.08703618873110398, | |
| "grad_norm": 0.6665637493133545, | |
| "learning_rate": 4.786365818530965e-05, | |
| "loss": 0.2227, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.09161704076958314, | |
| "grad_norm": 0.7171844840049744, | |
| "learning_rate": 4.762361977916467e-05, | |
| "loss": 0.2291, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.0961978928080623, | |
| "grad_norm": 0.606952965259552, | |
| "learning_rate": 4.738358137301969e-05, | |
| "loss": 0.2496, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.10077874484654145, | |
| "grad_norm": 0.6014417409896851, | |
| "learning_rate": 4.71435429668747e-05, | |
| "loss": 0.217, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.10535959688502061, | |
| "grad_norm": 0.5063177943229675, | |
| "learning_rate": 4.690350456072972e-05, | |
| "loss": 0.2155, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.10994044892349977, | |
| "grad_norm": 0.4963516294956207, | |
| "learning_rate": 4.666346615458473e-05, | |
| "loss": 0.2067, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.11452130096197893, | |
| "grad_norm": 0.6876777410507202, | |
| "learning_rate": 4.642342774843975e-05, | |
| "loss": 0.2368, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.11910215300045808, | |
| "grad_norm": 0.33562833070755005, | |
| "learning_rate": 4.618338934229477e-05, | |
| "loss": 0.2215, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.12368300503893724, | |
| "grad_norm": 0.45656174421310425, | |
| "learning_rate": 4.5943350936149786e-05, | |
| "loss": 0.2051, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1282638570774164, | |
| "grad_norm": 0.5994214415550232, | |
| "learning_rate": 4.5703312530004805e-05, | |
| "loss": 0.2271, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.13284470911589555, | |
| "grad_norm": 0.6960343718528748, | |
| "learning_rate": 4.5463274123859816e-05, | |
| "loss": 0.1914, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.1374255611543747, | |
| "grad_norm": 0.5771898031234741, | |
| "learning_rate": 4.5223235717714835e-05, | |
| "loss": 0.2365, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.14200641319285387, | |
| "grad_norm": 0.3877688944339752, | |
| "learning_rate": 4.498319731156985e-05, | |
| "loss": 0.2018, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.14658726523133303, | |
| "grad_norm": 0.5205092430114746, | |
| "learning_rate": 4.474315890542487e-05, | |
| "loss": 0.2592, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1511681172698122, | |
| "grad_norm": 0.8437159061431885, | |
| "learning_rate": 4.450312049927989e-05, | |
| "loss": 0.2497, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.15574896930829135, | |
| "grad_norm": 1.130053997039795, | |
| "learning_rate": 4.426308209313491e-05, | |
| "loss": 0.2258, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1603298213467705, | |
| "grad_norm": 0.4410316050052643, | |
| "learning_rate": 4.402304368698992e-05, | |
| "loss": 0.2134, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.16491067338524965, | |
| "grad_norm": 0.7471569776535034, | |
| "learning_rate": 4.378300528084494e-05, | |
| "loss": 0.2121, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1694915254237288, | |
| "grad_norm": 0.6091960072517395, | |
| "learning_rate": 4.354296687469995e-05, | |
| "loss": 0.2367, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.17407237746220797, | |
| "grad_norm": 0.37467148900032043, | |
| "learning_rate": 4.330292846855497e-05, | |
| "loss": 0.2127, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.17865322950068713, | |
| "grad_norm": 0.4893397390842438, | |
| "learning_rate": 4.306289006240999e-05, | |
| "loss": 0.2018, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1832340815391663, | |
| "grad_norm": 0.41944998502731323, | |
| "learning_rate": 4.2822851656265006e-05, | |
| "loss": 0.2163, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.18781493357764545, | |
| "grad_norm": 0.327027291059494, | |
| "learning_rate": 4.2582813250120024e-05, | |
| "loss": 0.2274, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.1923957856161246, | |
| "grad_norm": 0.4042939245700836, | |
| "learning_rate": 4.2342774843975036e-05, | |
| "loss": 0.182, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.19697663765460377, | |
| "grad_norm": 0.5198964476585388, | |
| "learning_rate": 4.2102736437830054e-05, | |
| "loss": 0.2229, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2015574896930829, | |
| "grad_norm": 0.5307021737098694, | |
| "learning_rate": 4.186269803168507e-05, | |
| "loss": 0.2123, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.20613834173156206, | |
| "grad_norm": 0.3677486777305603, | |
| "learning_rate": 4.162265962554009e-05, | |
| "loss": 0.2166, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.21071919377004122, | |
| "grad_norm": 0.38226303458213806, | |
| "learning_rate": 4.138262121939511e-05, | |
| "loss": 0.2389, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.21530004580852038, | |
| "grad_norm": 0.4093310534954071, | |
| "learning_rate": 4.114258281325012e-05, | |
| "loss": 0.1985, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.21988089784699955, | |
| "grad_norm": 0.3003683090209961, | |
| "learning_rate": 4.090254440710514e-05, | |
| "loss": 0.1982, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.2244617498854787, | |
| "grad_norm": 0.48493343591690063, | |
| "learning_rate": 4.066250600096015e-05, | |
| "loss": 0.1956, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.22904260192395787, | |
| "grad_norm": 0.44902732968330383, | |
| "learning_rate": 4.042246759481517e-05, | |
| "loss": 0.2145, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.22904260192395787, | |
| "eval_loss": 0.21088281273841858, | |
| "eval_runtime": 30.1602, | |
| "eval_samples_per_second": 16.081, | |
| "eval_steps_per_second": 8.057, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.233623453962437, | |
| "grad_norm": 0.26024484634399414, | |
| "learning_rate": 4.018242918867019e-05, | |
| "loss": 0.2156, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.23820430600091616, | |
| "grad_norm": 0.3754175901412964, | |
| "learning_rate": 3.994239078252521e-05, | |
| "loss": 0.2071, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.24278515803939532, | |
| "grad_norm": 0.32323503494262695, | |
| "learning_rate": 3.9702352376380225e-05, | |
| "loss": 0.2093, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.24736601007787448, | |
| "grad_norm": 0.41066277027130127, | |
| "learning_rate": 3.946231397023524e-05, | |
| "loss": 0.182, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.25194686211635364, | |
| "grad_norm": 0.32582810521125793, | |
| "learning_rate": 3.9222275564090255e-05, | |
| "loss": 0.1954, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.2565277141548328, | |
| "grad_norm": 0.2638744115829468, | |
| "learning_rate": 3.8982237157945274e-05, | |
| "loss": 0.2034, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.26110856619331196, | |
| "grad_norm": 0.3939040005207062, | |
| "learning_rate": 3.874219875180029e-05, | |
| "loss": 0.2009, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.2656894182317911, | |
| "grad_norm": 0.3458320200443268, | |
| "learning_rate": 3.850216034565531e-05, | |
| "loss": 0.2195, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.2702702702702703, | |
| "grad_norm": 0.5092883706092834, | |
| "learning_rate": 3.826212193951033e-05, | |
| "loss": 0.2103, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.2748511223087494, | |
| "grad_norm": 0.3199244439601898, | |
| "learning_rate": 3.802208353336534e-05, | |
| "loss": 0.196, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.2794319743472286, | |
| "grad_norm": 0.2731751799583435, | |
| "learning_rate": 3.778204512722036e-05, | |
| "loss": 0.1827, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.28401282638570774, | |
| "grad_norm": 0.5365939736366272, | |
| "learning_rate": 3.754200672107537e-05, | |
| "loss": 0.2467, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.2885936784241869, | |
| "grad_norm": 0.6997144222259521, | |
| "learning_rate": 3.730196831493039e-05, | |
| "loss": 0.204, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.29317453046266606, | |
| "grad_norm": 0.5586032271385193, | |
| "learning_rate": 3.706192990878541e-05, | |
| "loss": 0.206, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.2977553825011452, | |
| "grad_norm": 0.4491008222103119, | |
| "learning_rate": 3.6821891502640426e-05, | |
| "loss": 0.2008, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.3023362345396244, | |
| "grad_norm": 0.4853453040122986, | |
| "learning_rate": 3.658185309649544e-05, | |
| "loss": 0.2112, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.3069170865781035, | |
| "grad_norm": 0.5321414470672607, | |
| "learning_rate": 3.6341814690350456e-05, | |
| "loss": 0.1952, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.3114979386165827, | |
| "grad_norm": 0.3484733998775482, | |
| "learning_rate": 3.6101776284205475e-05, | |
| "loss": 0.2126, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.31607879065506184, | |
| "grad_norm": 0.5444738268852234, | |
| "learning_rate": 3.586173787806049e-05, | |
| "loss": 0.203, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.320659642693541, | |
| "grad_norm": 0.4600459933280945, | |
| "learning_rate": 3.562169947191551e-05, | |
| "loss": 0.1978, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.32524049473202016, | |
| "grad_norm": 0.32603543996810913, | |
| "learning_rate": 3.538166106577053e-05, | |
| "loss": 0.1853, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.3298213467704993, | |
| "grad_norm": 0.23920612037181854, | |
| "learning_rate": 3.514162265962554e-05, | |
| "loss": 0.1951, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.3344021988089785, | |
| "grad_norm": 0.39811599254608154, | |
| "learning_rate": 3.490158425348056e-05, | |
| "loss": 0.2005, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.3389830508474576, | |
| "grad_norm": 0.3018939793109894, | |
| "learning_rate": 3.466154584733557e-05, | |
| "loss": 0.1566, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.3435639028859368, | |
| "grad_norm": 0.47183758020401, | |
| "learning_rate": 3.442150744119059e-05, | |
| "loss": 0.208, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.34814475492441593, | |
| "grad_norm": 0.3024599254131317, | |
| "learning_rate": 3.418146903504561e-05, | |
| "loss": 0.1986, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.3527256069628951, | |
| "grad_norm": 0.6011927723884583, | |
| "learning_rate": 3.394143062890063e-05, | |
| "loss": 0.2189, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.35730645900137425, | |
| "grad_norm": 0.3051789700984955, | |
| "learning_rate": 3.370139222275564e-05, | |
| "loss": 0.2245, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.3618873110398534, | |
| "grad_norm": 0.46620339155197144, | |
| "learning_rate": 3.346135381661066e-05, | |
| "loss": 0.222, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.3664681630783326, | |
| "grad_norm": 0.44738519191741943, | |
| "learning_rate": 3.3221315410465676e-05, | |
| "loss": 0.192, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3710490151168117, | |
| "grad_norm": 0.47053661942481995, | |
| "learning_rate": 3.2981277004320694e-05, | |
| "loss": 0.1994, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.3756298671552909, | |
| "grad_norm": 0.37152695655822754, | |
| "learning_rate": 3.274123859817571e-05, | |
| "loss": 0.2318, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.38021071919377003, | |
| "grad_norm": 0.3711836338043213, | |
| "learning_rate": 3.250120019203073e-05, | |
| "loss": 0.209, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.3847915712322492, | |
| "grad_norm": 0.6674831509590149, | |
| "learning_rate": 3.226116178588574e-05, | |
| "loss": 0.2195, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.38937242327072835, | |
| "grad_norm": 0.9787792563438416, | |
| "learning_rate": 3.202112337974076e-05, | |
| "loss": 0.1758, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.39395327530920754, | |
| "grad_norm": 0.34313926100730896, | |
| "learning_rate": 3.178108497359578e-05, | |
| "loss": 0.2291, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.3985341273476867, | |
| "grad_norm": 0.273093044757843, | |
| "learning_rate": 3.154104656745079e-05, | |
| "loss": 0.2128, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.4031149793861658, | |
| "grad_norm": 0.39359042048454285, | |
| "learning_rate": 3.130100816130581e-05, | |
| "loss": 0.1886, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.407695831424645, | |
| "grad_norm": 0.28753426671028137, | |
| "learning_rate": 3.106096975516083e-05, | |
| "loss": 0.1942, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.4122766834631241, | |
| "grad_norm": 0.5091426372528076, | |
| "learning_rate": 3.082093134901584e-05, | |
| "loss": 0.2132, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.4168575355016033, | |
| "grad_norm": 0.5309694409370422, | |
| "learning_rate": 3.058089294287086e-05, | |
| "loss": 0.215, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.42143838754008245, | |
| "grad_norm": 0.2943398952484131, | |
| "learning_rate": 3.0340854536725876e-05, | |
| "loss": 0.2151, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.42601923957856164, | |
| "grad_norm": 0.29028087854385376, | |
| "learning_rate": 3.0100816130580895e-05, | |
| "loss": 0.1953, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.43060009161704077, | |
| "grad_norm": 0.2482720911502838, | |
| "learning_rate": 2.9860777724435913e-05, | |
| "loss": 0.2022, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.4351809436555199, | |
| "grad_norm": 0.23267552256584167, | |
| "learning_rate": 2.962073931829093e-05, | |
| "loss": 0.2134, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.4397617956939991, | |
| "grad_norm": 0.2027808278799057, | |
| "learning_rate": 2.9380700912145943e-05, | |
| "loss": 0.2015, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.4443426477324782, | |
| "grad_norm": 0.3373633027076721, | |
| "learning_rate": 2.914066250600096e-05, | |
| "loss": 0.1959, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.4489234997709574, | |
| "grad_norm": 0.3878941237926483, | |
| "learning_rate": 2.8900624099855977e-05, | |
| "loss": 0.1879, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.45350435180943655, | |
| "grad_norm": 0.2899917960166931, | |
| "learning_rate": 2.8660585693710995e-05, | |
| "loss": 0.2115, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.45808520384791573, | |
| "grad_norm": 0.2504674196243286, | |
| "learning_rate": 2.8420547287566014e-05, | |
| "loss": 0.219, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.45808520384791573, | |
| "eval_loss": 0.20872141420841217, | |
| "eval_runtime": 30.4982, | |
| "eval_samples_per_second": 15.903, | |
| "eval_steps_per_second": 7.968, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2183, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.3689918332928e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |