| { | |
| "best_metric": 2.2710835310135735e-06, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-150", | |
| "epoch": 1.941747572815534, | |
| "eval_steps": 50, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012944983818770227, | |
| "grad_norm": 1.0300663709640503, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1373, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.012944983818770227, | |
| "eval_loss": 0.2981894910335541, | |
| "eval_runtime": 9.5829, | |
| "eval_samples_per_second": 13.566, | |
| "eval_steps_per_second": 3.444, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025889967637540454, | |
| "grad_norm": 1.242613434791565, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1639, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.038834951456310676, | |
| "grad_norm": 1.2580989599227905, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1689, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05177993527508091, | |
| "grad_norm": 1.0557060241699219, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1523, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06472491909385113, | |
| "grad_norm": 0.6219015121459961, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0967, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07766990291262135, | |
| "grad_norm": 0.5410397052764893, | |
| "learning_rate": 6e-05, | |
| "loss": 0.0613, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09061488673139159, | |
| "grad_norm": 0.6054167151451111, | |
| "learning_rate": 7e-05, | |
| "loss": 0.0308, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10355987055016182, | |
| "grad_norm": 0.26588791608810425, | |
| "learning_rate": 8e-05, | |
| "loss": 0.011, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11650485436893204, | |
| "grad_norm": 0.1244489848613739, | |
| "learning_rate": 9e-05, | |
| "loss": 0.004, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12944983818770225, | |
| "grad_norm": 0.06946226954460144, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0012, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1423948220064725, | |
| "grad_norm": 0.05331278219819069, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 0.0008, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.1553398058252427, | |
| "grad_norm": 0.07835312187671661, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.0004, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16828478964401294, | |
| "grad_norm": 0.010071509517729282, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.0001, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18122977346278318, | |
| "grad_norm": 0.2724490165710449, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.0015, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.1941747572815534, | |
| "grad_norm": 0.005802796222269535, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.0001, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.20711974110032363, | |
| "grad_norm": 0.0032133806962519884, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.0001, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.22006472491909385, | |
| "grad_norm": 0.006188486702740192, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.0001, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23300970873786409, | |
| "grad_norm": 2.9183788299560547, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.0014, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.2459546925566343, | |
| "grad_norm": 0.009266923181712627, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.0001, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2588996763754045, | |
| "grad_norm": 0.11878190189599991, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.001, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27184466019417475, | |
| "grad_norm": 0.05568773299455643, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.0003, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.284789644012945, | |
| "grad_norm": 0.27257758378982544, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 0.0003, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2977346278317152, | |
| "grad_norm": 0.0010619190288707614, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3106796116504854, | |
| "grad_norm": 0.0012464552419260144, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32362459546925565, | |
| "grad_norm": 0.004392554517835379, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 0.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3365695792880259, | |
| "grad_norm": 0.0003919773153029382, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 0.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.34951456310679613, | |
| "grad_norm": 0.10447962582111359, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 0.0001, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36245954692556637, | |
| "grad_norm": 0.1830519437789917, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.0004, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.37540453074433655, | |
| "grad_norm": 0.0003561489866115153, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.3883495145631068, | |
| "grad_norm": 0.004686282481998205, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.40129449838187703, | |
| "grad_norm": 0.0013320472789928317, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 0.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.41423948220064727, | |
| "grad_norm": 0.025852827355265617, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.42718446601941745, | |
| "grad_norm": 0.00043161975918337703, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 0.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4401294498381877, | |
| "grad_norm": 0.001204935135319829, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 0.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45307443365695793, | |
| "grad_norm": 0.00046805289457552135, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.46601941747572817, | |
| "grad_norm": 0.0006769143510609865, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.47896440129449835, | |
| "grad_norm": 0.0007127629360184073, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 0.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.4919093851132686, | |
| "grad_norm": 0.001910505467094481, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 0.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5048543689320388, | |
| "grad_norm": 0.0006861760630272329, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.517799352750809, | |
| "grad_norm": 0.0065710097551345825, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5307443365695793, | |
| "grad_norm": 0.14705024659633636, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.0005, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5436893203883495, | |
| "grad_norm": 0.02524726092815399, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5566343042071198, | |
| "grad_norm": 0.00010728214692790061, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 0.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.56957928802589, | |
| "grad_norm": 0.0009991212282329798, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5825242718446602, | |
| "grad_norm": 0.00020374900486785918, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5954692556634305, | |
| "grad_norm": 0.00016235416114795953, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6084142394822006, | |
| "grad_norm": 0.00022325626923702657, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 0.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6213592233009708, | |
| "grad_norm": 0.0018907163757830858, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6343042071197411, | |
| "grad_norm": 0.00039835559437051415, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 0.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6472491909385113, | |
| "grad_norm": 0.0002363417879678309, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 0.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6472491909385113, | |
| "eval_loss": 5.708453318220563e-05, | |
| "eval_runtime": 9.8109, | |
| "eval_samples_per_second": 13.251, | |
| "eval_steps_per_second": 3.364, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6601941747572816, | |
| "grad_norm": 0.5545428395271301, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 0.0024, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6731391585760518, | |
| "grad_norm": 0.0015667704865336418, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 0.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.686084142394822, | |
| "grad_norm": 0.011077708564698696, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 0.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6990291262135923, | |
| "grad_norm": 0.010326799936592579, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 0.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7119741100323624, | |
| "grad_norm": 0.0014523833524435759, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 0.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7249190938511327, | |
| "grad_norm": 0.06251269578933716, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 0.0002, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7378640776699029, | |
| "grad_norm": 0.003645202610641718, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 0.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7508090614886731, | |
| "grad_norm": 0.2278345227241516, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 0.0013, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7637540453074434, | |
| "grad_norm": 0.003915652632713318, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 0.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7766990291262136, | |
| "grad_norm": 0.0031239991076290607, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 0.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7896440129449838, | |
| "grad_norm": 0.006219485308974981, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 0.0001, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8025889967637541, | |
| "grad_norm": 0.005084617529064417, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 0.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8155339805825242, | |
| "grad_norm": 0.003509129863232374, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 0.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8284789644012945, | |
| "grad_norm": 0.0037375520914793015, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8414239482200647, | |
| "grad_norm": 0.00264919875189662, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 0.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8543689320388349, | |
| "grad_norm": 0.10307146608829498, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 0.0002, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8673139158576052, | |
| "grad_norm": 0.001447496935725212, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 0.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8802588996763754, | |
| "grad_norm": 0.0005993408849462867, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 0.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8932038834951457, | |
| "grad_norm": 0.0018503112951293588, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 0.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9061488673139159, | |
| "grad_norm": 0.0011176472762599587, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 0.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.919093851132686, | |
| "grad_norm": 0.0071342382580041885, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 0.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9320388349514563, | |
| "grad_norm": 0.0045208255760371685, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 0.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9449838187702265, | |
| "grad_norm": 0.0016186069697141647, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 0.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9579288025889967, | |
| "grad_norm": 0.0014599431306123734, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 0.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.970873786407767, | |
| "grad_norm": 0.0028509364929050207, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 0.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9838187702265372, | |
| "grad_norm": 0.0015371855115517974, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 0.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9967637540453075, | |
| "grad_norm": 0.0928664579987526, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 0.0001, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0097087378640777, | |
| "grad_norm": 0.0019110508728772402, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 0.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.022653721682848, | |
| "grad_norm": 0.0001286536717088893, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 0.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.035598705501618, | |
| "grad_norm": 0.00010072574514197186, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0485436893203883, | |
| "grad_norm": 8.10532656032592e-05, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 0.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.0614886731391586, | |
| "grad_norm": 0.00010899638436967507, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 0.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.074433656957929, | |
| "grad_norm": 7.373379776254296e-05, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 0.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.087378640776699, | |
| "grad_norm": 0.00044416377204470336, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.1003236245954693, | |
| "grad_norm": 0.00014110577467363328, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 0.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.1132686084142396, | |
| "grad_norm": 7.416921289404854e-05, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.1262135922330097, | |
| "grad_norm": 6.953041884116828e-05, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 0.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.13915857605178, | |
| "grad_norm": 0.00018072547391057014, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 0.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1521035598705502, | |
| "grad_norm": 0.00014231581008061767, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 0.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.1650485436893203, | |
| "grad_norm": 0.049870043992996216, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 0.0001, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1779935275080906, | |
| "grad_norm": 7.035312592051923e-05, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 0.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.190938511326861, | |
| "grad_norm": 0.00027099126600660384, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 0.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.203883495145631, | |
| "grad_norm": 0.00015066895866766572, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 0.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.2168284789644013, | |
| "grad_norm": 0.0002044395951088518, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 0.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.2297734627831716, | |
| "grad_norm": 0.00016389232769142836, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 0.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2427184466019416, | |
| "grad_norm": 0.00033923389855772257, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 0.0, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.255663430420712, | |
| "grad_norm": 0.00017732290143612772, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 0.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.2686084142394822, | |
| "grad_norm": 0.010200129821896553, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 0.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.2815533980582523, | |
| "grad_norm": 0.00042962911538779736, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 0.0, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.2944983818770226, | |
| "grad_norm": 0.0004276715044397861, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 0.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.2944983818770226, | |
| "eval_loss": 2.6809143491846044e-06, | |
| "eval_runtime": 9.7982, | |
| "eval_samples_per_second": 13.268, | |
| "eval_steps_per_second": 3.368, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.307443365695793, | |
| "grad_norm": 6.501084862975404e-05, | |
| "learning_rate": 5.330452921628497e-05, | |
| "loss": 0.0, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.3203883495145632, | |
| "grad_norm": 0.00010038226173492149, | |
| "learning_rate": 5.247918773366112e-05, | |
| "loss": 0.0, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.0001285731268581003, | |
| "learning_rate": 5.165316846586541e-05, | |
| "loss": 0.0, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.3462783171521036, | |
| "grad_norm": 0.001511538284830749, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 0.0, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.3592233009708738, | |
| "grad_norm": 0.0001728609058773145, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.3721682847896441, | |
| "grad_norm": 0.00020497982040978968, | |
| "learning_rate": 4.917330276168208e-05, | |
| "loss": 0.0, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.3851132686084142, | |
| "grad_norm": 0.00012128037633374333, | |
| "learning_rate": 4.834683153413459e-05, | |
| "loss": 0.0, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.3980582524271845, | |
| "grad_norm": 0.00011459392408141866, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 0.0, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.4110032362459548, | |
| "grad_norm": 0.0001164645073004067, | |
| "learning_rate": 4.669547078371504e-05, | |
| "loss": 0.0, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.4239482200647249, | |
| "grad_norm": 0.0002743539516814053, | |
| "learning_rate": 4.5871032726383386e-05, | |
| "loss": 0.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4368932038834952, | |
| "grad_norm": 0.00023465685080736876, | |
| "learning_rate": 4.504772348747687e-05, | |
| "loss": 0.0, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.4498381877022655, | |
| "grad_norm": 0.0001482328079873696, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 0.0, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.4627831715210355, | |
| "grad_norm": 0.00019140676886308938, | |
| "learning_rate": 4.3405391432896555e-05, | |
| "loss": 0.0, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.4757281553398058, | |
| "grad_norm": 0.0003757222439162433, | |
| "learning_rate": 4.2586817614407895e-05, | |
| "loss": 0.0, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.4886731391585761, | |
| "grad_norm": 0.00025924082729034126, | |
| "learning_rate": 4.17702704859633e-05, | |
| "loss": 0.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.5016181229773462, | |
| "grad_norm": 0.0408693365752697, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 0.0001, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.5145631067961165, | |
| "grad_norm": 0.0011624947655946016, | |
| "learning_rate": 4.0144148627425993e-05, | |
| "loss": 0.0, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.5275080906148868, | |
| "grad_norm": 8.236700523411855e-05, | |
| "learning_rate": 3.933501846281267e-05, | |
| "loss": 0.0, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.5404530744336569, | |
| "grad_norm": 0.00018579971219878644, | |
| "learning_rate": 3.852880399766243e-05, | |
| "loss": 0.0, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.5533980582524272, | |
| "grad_norm": 0.0002899975224863738, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 0.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5663430420711975, | |
| "grad_norm": 0.001745874877087772, | |
| "learning_rate": 3.6926002952309016e-05, | |
| "loss": 0.0, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.5792880258899675, | |
| "grad_norm": 0.0003118854365311563, | |
| "learning_rate": 3.612985456190778e-05, | |
| "loss": 0.0, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.5922330097087378, | |
| "grad_norm": 0.00011779765918618068, | |
| "learning_rate": 3.533749813077677e-05, | |
| "loss": 0.0, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.6051779935275081, | |
| "grad_norm": 0.0001423739013262093, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.0, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.6181229773462782, | |
| "grad_norm": 0.0001056610417435877, | |
| "learning_rate": 3.3765026539765834e-05, | |
| "loss": 0.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.6310679611650487, | |
| "grad_norm": 9.993094863602892e-05, | |
| "learning_rate": 3.298534127791785e-05, | |
| "loss": 0.0, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.6440129449838188, | |
| "grad_norm": 0.00013124274846632034, | |
| "learning_rate": 3.221030765387417e-05, | |
| "loss": 0.0, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.6569579288025889, | |
| "grad_norm": 9.836336539592594e-05, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 0.0, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.6699029126213594, | |
| "grad_norm": 0.0002020129031734541, | |
| "learning_rate": 3.0675041535377405e-05, | |
| "loss": 0.0, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.6828478964401294, | |
| "grad_norm": 0.0007685688906349242, | |
| "learning_rate": 2.991522876735154e-05, | |
| "loss": 0.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.6957928802588995, | |
| "grad_norm": 0.0014894054038450122, | |
| "learning_rate": 2.916090697523549e-05, | |
| "loss": 0.0, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.70873786407767, | |
| "grad_norm": 0.0004369337111711502, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 0.0, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.72168284789644, | |
| "grad_norm": 0.000166458121384494, | |
| "learning_rate": 2.766955965735968e-05, | |
| "loss": 0.0, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.7346278317152104, | |
| "grad_norm": 0.0002028081362368539, | |
| "learning_rate": 2.693294185106562e-05, | |
| "loss": 0.0, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.7475728155339807, | |
| "grad_norm": 0.0001326661295024678, | |
| "learning_rate": 2.6202630348146324e-05, | |
| "loss": 0.0, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.7605177993527508, | |
| "grad_norm": 6.024070535204373e-05, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 0.0, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.773462783171521, | |
| "grad_norm": 0.0004106041742488742, | |
| "learning_rate": 2.476172311325783e-05, | |
| "loss": 0.0, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.7864077669902914, | |
| "grad_norm": 5.501594932866283e-05, | |
| "learning_rate": 2.405152131093926e-05, | |
| "loss": 0.0, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.7993527508090614, | |
| "grad_norm": 8.930330659495667e-05, | |
| "learning_rate": 2.3348413563600325e-05, | |
| "loss": 0.0, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.8122977346278317, | |
| "grad_norm": 0.00012645589595194906, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 0.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.825242718446602, | |
| "grad_norm": 6.120719626778737e-05, | |
| "learning_rate": 2.196424713241637e-05, | |
| "loss": 0.0, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.838187702265372, | |
| "grad_norm": 0.0001415311126038432, | |
| "learning_rate": 2.128356686585282e-05, | |
| "loss": 0.0, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.8511326860841424, | |
| "grad_norm": 0.00016300934657920152, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 0.0, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.8640776699029127, | |
| "grad_norm": 6.67195490677841e-05, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 0.0, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.8770226537216828, | |
| "grad_norm": 0.000838777341414243, | |
| "learning_rate": 1.928936436551661e-05, | |
| "loss": 0.0, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.889967637540453, | |
| "grad_norm": 8.979487756732851e-05, | |
| "learning_rate": 1.8641182076323148e-05, | |
| "loss": 0.0, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.9029126213592233, | |
| "grad_norm": 0.0001125104317907244, | |
| "learning_rate": 1.800157297483417e-05, | |
| "loss": 0.0, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.9158576051779934, | |
| "grad_norm": 9.87684543360956e-05, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 0.0, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.9288025889967637, | |
| "grad_norm": 0.0005952689098194242, | |
| "learning_rate": 1.6748771394307585e-05, | |
| "loss": 0.0, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.941747572815534, | |
| "grad_norm": 0.006320950109511614, | |
| "learning_rate": 1.6135921418712956e-05, | |
| "loss": 0.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.941747572815534, | |
| "eval_loss": 2.2710835310135735e-06, | |
| "eval_runtime": 9.8147, | |
| "eval_samples_per_second": 13.245, | |
| "eval_steps_per_second": 3.362, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.261827547132068e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |