| { | |
| "best_metric": 5.114246505399933e-06, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-50", | |
| "epoch": 1.2944983818770226, | |
| "eval_steps": 50, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012944983818770227, | |
| "grad_norm": 1.0203962326049805, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1373, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.012944983818770227, | |
| "eval_loss": 0.2981894910335541, | |
| "eval_runtime": 9.5643, | |
| "eval_samples_per_second": 13.592, | |
| "eval_steps_per_second": 3.45, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.025889967637540454, | |
| "grad_norm": 1.2321498394012451, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1639, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.038834951456310676, | |
| "grad_norm": 1.2425283193588257, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1687, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05177993527508091, | |
| "grad_norm": 1.0462325811386108, | |
| "learning_rate": 4e-05, | |
| "loss": 0.1525, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06472491909385113, | |
| "grad_norm": 0.6241796016693115, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0971, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07766990291262135, | |
| "grad_norm": 0.5437662601470947, | |
| "learning_rate": 6e-05, | |
| "loss": 0.0619, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09061488673139159, | |
| "grad_norm": 0.6059942245483398, | |
| "learning_rate": 7e-05, | |
| "loss": 0.0309, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10355987055016182, | |
| "grad_norm": 0.2637348473072052, | |
| "learning_rate": 8e-05, | |
| "loss": 0.0107, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11650485436893204, | |
| "grad_norm": 0.12676547467708588, | |
| "learning_rate": 9e-05, | |
| "loss": 0.0039, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12944983818770225, | |
| "grad_norm": 0.07366133481264114, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0013, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1423948220064725, | |
| "grad_norm": 0.06091802567243576, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 0.0008, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.1553398058252427, | |
| "grad_norm": 0.09185962378978729, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.0005, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16828478964401294, | |
| "grad_norm": 0.007255407050251961, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.0001, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18122977346278318, | |
| "grad_norm": 0.19597330689430237, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.0009, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.1941747572815534, | |
| "grad_norm": 0.002528818789869547, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.20711974110032363, | |
| "grad_norm": 0.0019764695316553116, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.22006472491909385, | |
| "grad_norm": 0.004141903482377529, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.0001, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23300970873786409, | |
| "grad_norm": 0.24057364463806152, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.0001, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.2459546925566343, | |
| "grad_norm": 0.007576645351946354, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.0001, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2588996763754045, | |
| "grad_norm": 0.11651884764432907, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.0008, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27184466019417475, | |
| "grad_norm": 0.05754302814602852, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.0003, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.284789644012945, | |
| "grad_norm": 0.05541009455919266, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 0.0002, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2977346278317152, | |
| "grad_norm": 0.002488422906026244, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3106796116504854, | |
| "grad_norm": 0.0021644814405590296, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32362459546925565, | |
| "grad_norm": 0.003139674663543701, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 0.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3365695792880259, | |
| "grad_norm": 0.0062321946024894714, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 0.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.34951456310679613, | |
| "grad_norm": 0.0015530330128967762, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 0.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36245954692556637, | |
| "grad_norm": 0.0009751790203154087, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.37540453074433655, | |
| "grad_norm": 0.0011889493325725198, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.3883495145631068, | |
| "grad_norm": 0.0020521723199635744, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.40129449838187703, | |
| "grad_norm": 0.001421238761395216, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 0.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.41423948220064727, | |
| "grad_norm": 0.0026180348359048367, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.42718446601941745, | |
| "grad_norm": 0.001268848660402, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 0.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4401294498381877, | |
| "grad_norm": 0.0014048975426703691, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 0.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45307443365695793, | |
| "grad_norm": 0.0029678246937692165, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.46601941747572817, | |
| "grad_norm": 0.0025685736909508705, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.47896440129449835, | |
| "grad_norm": 0.003125803079456091, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 0.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.4919093851132686, | |
| "grad_norm": 0.007677475456148386, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 0.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5048543689320388, | |
| "grad_norm": 0.0005735827726311982, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.517799352750809, | |
| "grad_norm": 0.01382033433765173, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5307443365695793, | |
| "grad_norm": 0.00015104783233255148, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5436893203883495, | |
| "grad_norm": 0.0015415921807289124, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.0, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5566343042071198, | |
| "grad_norm": 0.0002161674783565104, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 0.0, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.56957928802589, | |
| "grad_norm": 0.00024602553457953036, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.0, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5825242718446602, | |
| "grad_norm": 0.0005134981474839151, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5954692556634305, | |
| "grad_norm": 0.00012119917664676905, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.0, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6084142394822006, | |
| "grad_norm": 0.00011360419739503413, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 0.0, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6213592233009708, | |
| "grad_norm": 0.0001089339202735573, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.0, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6343042071197411, | |
| "grad_norm": 0.00019188143778592348, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 0.0, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6472491909385113, | |
| "grad_norm": 0.0002130446519004181, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 0.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6472491909385113, | |
| "eval_loss": 5.114246505399933e-06, | |
| "eval_runtime": 9.8014, | |
| "eval_samples_per_second": 13.263, | |
| "eval_steps_per_second": 3.367, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6601941747572816, | |
| "grad_norm": 0.0008072551572695374, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 0.0, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6731391585760518, | |
| "grad_norm": 0.0003115938452538103, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 0.0, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.686084142394822, | |
| "grad_norm": 0.0003803007130045444, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 0.0, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6990291262135923, | |
| "grad_norm": 0.001679311622865498, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 0.0, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7119741100323624, | |
| "grad_norm": 0.0014463587431237102, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 0.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7249190938511327, | |
| "grad_norm": 0.00020687367941718549, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 0.0, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7378640776699029, | |
| "grad_norm": 0.00012489533401094377, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 0.0, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7508090614886731, | |
| "grad_norm": 0.0011946961749345064, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 0.0, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7637540453074434, | |
| "grad_norm": 0.0034886086359620094, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 0.0, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7766990291262136, | |
| "grad_norm": 0.019774286076426506, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 0.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7896440129449838, | |
| "grad_norm": 0.00012018957932014018, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 0.0, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8025889967637541, | |
| "grad_norm": 0.0008413722389377654, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 0.0, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8155339805825242, | |
| "grad_norm": 7.062353688525036e-05, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 0.0, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8284789644012945, | |
| "grad_norm": 0.0010793240508064628, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.0, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8414239482200647, | |
| "grad_norm": 0.00023476831847801805, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 0.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8543689320388349, | |
| "grad_norm": 0.00031458126613870263, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 0.0, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8673139158576052, | |
| "grad_norm": 0.0001667094766162336, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 0.0, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8802588996763754, | |
| "grad_norm": 0.00017667474457994103, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 0.0, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.8932038834951457, | |
| "grad_norm": 0.0005229483940638602, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 0.0, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9061488673139159, | |
| "grad_norm": 0.00027375674108043313, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 0.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.919093851132686, | |
| "grad_norm": 0.00028505577938631177, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 0.0, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9320388349514563, | |
| "grad_norm": 0.002035663463175297, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 0.0, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9449838187702265, | |
| "grad_norm": 0.0007079575443640351, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 0.0, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9579288025889967, | |
| "grad_norm": 0.0003011709777638316, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 0.0, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.970873786407767, | |
| "grad_norm": 0.0004272864025551826, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 0.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9838187702265372, | |
| "grad_norm": 0.0001912742154672742, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 0.0, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9967637540453075, | |
| "grad_norm": 0.0001855505834100768, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 0.0, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0097087378640777, | |
| "grad_norm": 0.000368554494343698, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 0.0, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.022653721682848, | |
| "grad_norm": 0.0005397357745096087, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 0.0, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.035598705501618, | |
| "grad_norm": 9.197810140904039e-05, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0485436893203883, | |
| "grad_norm": 0.0047591556794941425, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 0.0, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.0614886731391586, | |
| "grad_norm": 9.622135985409841e-05, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 0.0, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.074433656957929, | |
| "grad_norm": 0.0002235884458059445, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 0.0, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.087378640776699, | |
| "grad_norm": 6.833249790361151e-05, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.0, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.1003236245954693, | |
| "grad_norm": 0.0005551163922064006, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 0.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.1132686084142396, | |
| "grad_norm": 0.00010846730583580211, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.0, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.1262135922330097, | |
| "grad_norm": 5.773642988060601e-05, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 0.0, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.13915857605178, | |
| "grad_norm": 0.00016505061648786068, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 0.0, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1521035598705502, | |
| "grad_norm": 0.008876933716237545, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 0.0, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.1650485436893203, | |
| "grad_norm": 0.16270585358142853, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 0.0007, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1779935275080906, | |
| "grad_norm": 0.00022468912356998771, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 0.0, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.190938511326861, | |
| "grad_norm": 9.756765211932361e-05, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 0.0, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.203883495145631, | |
| "grad_norm": 0.00018813037604559213, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 0.0, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.2168284789644013, | |
| "grad_norm": 0.0003693510952871293, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 0.0, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.2297734627831716, | |
| "grad_norm": 0.0021898068953305483, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 0.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2427184466019416, | |
| "grad_norm": 0.37865975499153137, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 0.0006, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.255663430420712, | |
| "grad_norm": 0.0014068527380004525, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 0.0, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.2686084142394822, | |
| "grad_norm": 0.0009542718180455267, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 0.0, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.2815533980582523, | |
| "grad_norm": 0.09191913157701492, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 0.0001, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.2944983818770226, | |
| "grad_norm": 0.004437869414687157, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 0.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.2944983818770226, | |
| "eval_loss": 7.178713531175163e-06, | |
| "eval_runtime": 9.7892, | |
| "eval_samples_per_second": 13.28, | |
| "eval_steps_per_second": 3.371, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 1 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.50757075775914e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |