| { | |
| "best_metric": 11.916446685791016, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-200", | |
| "epoch": 0.36413290851160673, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0018206645425580337, | |
| "grad_norm": 0.03413726016879082, | |
| "learning_rate": 1e-05, | |
| "loss": 11.9323, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0018206645425580337, | |
| "eval_loss": 11.93479061126709, | |
| "eval_runtime": 5.058, | |
| "eval_samples_per_second": 182.88, | |
| "eval_steps_per_second": 45.868, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0036413290851160674, | |
| "grad_norm": 0.04938232898712158, | |
| "learning_rate": 2e-05, | |
| "loss": 11.9373, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.005461993627674101, | |
| "grad_norm": 0.03765185922384262, | |
| "learning_rate": 3e-05, | |
| "loss": 11.9313, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.007282658170232135, | |
| "grad_norm": 0.03305572271347046, | |
| "learning_rate": 4e-05, | |
| "loss": 11.9374, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.009103322712790168, | |
| "grad_norm": 0.04273687303066254, | |
| "learning_rate": 5e-05, | |
| "loss": 11.9327, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.010923987255348202, | |
| "grad_norm": 0.04251832887530327, | |
| "learning_rate": 6e-05, | |
| "loss": 11.9344, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.012744651797906235, | |
| "grad_norm": 0.04986019805073738, | |
| "learning_rate": 7e-05, | |
| "loss": 11.9337, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.01456531634046427, | |
| "grad_norm": 0.0382511280477047, | |
| "learning_rate": 8e-05, | |
| "loss": 11.9358, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.016385980883022302, | |
| "grad_norm": 0.04335345700383186, | |
| "learning_rate": 9e-05, | |
| "loss": 11.9329, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.018206645425580335, | |
| "grad_norm": 0.043804410845041275, | |
| "learning_rate": 0.0001, | |
| "loss": 11.9339, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02002730996813837, | |
| "grad_norm": 0.050628215074539185, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 11.9351, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.021847974510696404, | |
| "grad_norm": 0.043292414397001266, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 11.9353, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.023668639053254437, | |
| "grad_norm": 0.05560588464140892, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 11.9345, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.02548930359581247, | |
| "grad_norm": 0.06867741793394089, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 11.9322, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.027309968138370506, | |
| "grad_norm": 0.07315599173307419, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 11.9318, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.02913063268092854, | |
| "grad_norm": 0.07529256492853165, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 11.9328, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.030951297223486572, | |
| "grad_norm": 0.07898218929767609, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 11.9336, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.032771961766044605, | |
| "grad_norm": 0.08530876040458679, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 11.9336, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.03459262630860264, | |
| "grad_norm": 0.08568832278251648, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 11.932, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.03641329085116067, | |
| "grad_norm": 0.10579682141542435, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 11.9316, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03823395539371871, | |
| "grad_norm": 0.11739271134138107, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 11.9312, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.04005461993627674, | |
| "grad_norm": 0.11046414822340012, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 11.9301, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.041875284478834776, | |
| "grad_norm": 0.1442708969116211, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 11.9328, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.04369594902139281, | |
| "grad_norm": 0.14422491192817688, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 11.9277, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.04551661356395084, | |
| "grad_norm": 0.16111890971660614, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 11.9291, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.047337278106508875, | |
| "grad_norm": 0.13617178797721863, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 11.9292, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.04915794264906691, | |
| "grad_norm": 0.10961776971817017, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 11.9289, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.05097860719162494, | |
| "grad_norm": 0.13653425872325897, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 11.9262, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.05279927173418298, | |
| "grad_norm": 0.1738469898700714, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 11.923, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.05461993627674101, | |
| "grad_norm": 0.13671448826789856, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 11.924, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.056440600819299046, | |
| "grad_norm": 0.16584277153015137, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 11.9274, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.05826126536185708, | |
| "grad_norm": 0.1861305832862854, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 11.9206, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.06008192990441511, | |
| "grad_norm": 0.23096109926700592, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 11.9235, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.061902594446973144, | |
| "grad_norm": 0.1698852926492691, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 11.919, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.06372325898953118, | |
| "grad_norm": 0.13847710192203522, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 11.9193, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.06554392353208921, | |
| "grad_norm": 0.20390431582927704, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 11.9244, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.06736458807464725, | |
| "grad_norm": 0.13347230851650238, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 11.9165, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.06918525261720528, | |
| "grad_norm": 0.13313481211662292, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 11.9185, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.07100591715976332, | |
| "grad_norm": 0.14947770535945892, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 11.9168, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.07282658170232134, | |
| "grad_norm": 0.10729013383388519, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 11.9199, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07464724624487938, | |
| "grad_norm": 0.09050336480140686, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 11.9181, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.07646791078743742, | |
| "grad_norm": 0.14425118267536163, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 11.9193, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.07828857532999545, | |
| "grad_norm": 0.09219750761985779, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 11.915, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.08010923987255349, | |
| "grad_norm": 0.18210937082767487, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 11.9149, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.08192990441511151, | |
| "grad_norm": 0.10966989398002625, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 11.9171, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.08375056895766955, | |
| "grad_norm": 0.14103111624717712, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 11.9131, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.08557123350022758, | |
| "grad_norm": 0.1283736675977707, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 11.9146, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.08739189804278562, | |
| "grad_norm": 0.16424375772476196, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 11.9164, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.08921256258534364, | |
| "grad_norm": 0.2161012589931488, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 11.9115, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.09103322712790168, | |
| "grad_norm": 0.2343643307685852, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 11.9071, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09103322712790168, | |
| "eval_loss": 11.919342994689941, | |
| "eval_runtime": 5.0387, | |
| "eval_samples_per_second": 183.579, | |
| "eval_steps_per_second": 46.044, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09285389167045972, | |
| "grad_norm": 0.06608626991510391, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 11.9245, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.09467455621301775, | |
| "grad_norm": 0.07884740829467773, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 11.9238, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.09649522075557579, | |
| "grad_norm": 0.06321306526660919, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 11.9202, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.09831588529813381, | |
| "grad_norm": 0.08037225157022476, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 11.9244, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.10013654984069185, | |
| "grad_norm": 0.06967957317829132, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 11.9206, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.10195721438324988, | |
| "grad_norm": 0.06570564955472946, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 11.9239, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.10377787892580792, | |
| "grad_norm": 0.066615529358387, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 11.9257, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.10559854346836596, | |
| "grad_norm": 0.06762253493070602, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 11.9227, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.10741920801092399, | |
| "grad_norm": 0.06794265657663345, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 11.92, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.10923987255348203, | |
| "grad_norm": 0.054183103144168854, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 11.9229, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.11106053709604005, | |
| "grad_norm": 0.047181401401758194, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 11.9231, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.11288120163859809, | |
| "grad_norm": 0.0388273261487484, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 11.9231, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.11470186618115612, | |
| "grad_norm": 0.07356078177690506, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 11.9207, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.11652253072371416, | |
| "grad_norm": 0.060196008533239365, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 11.9203, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.11834319526627218, | |
| "grad_norm": 0.03406398370862007, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 11.9185, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.12016385980883022, | |
| "grad_norm": 0.04454463720321655, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 11.9204, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.12198452435138826, | |
| "grad_norm": 0.06781654059886932, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 11.9255, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.12380518889394629, | |
| "grad_norm": 0.03592757135629654, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 11.9213, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.12562585343650431, | |
| "grad_norm": 0.053486570715904236, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 11.9207, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.12744651797906237, | |
| "grad_norm": 0.045916493982076645, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 11.9186, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.1292671825216204, | |
| "grad_norm": 0.029214372858405113, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 11.9198, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.13108784706417842, | |
| "grad_norm": 0.04013194516301155, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 11.9189, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.13290851160673645, | |
| "grad_norm": 0.05094152316451073, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 11.9216, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.1347291761492945, | |
| "grad_norm": 0.05009616166353226, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 11.9178, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.13654984069185253, | |
| "grad_norm": 0.030104748904705048, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 11.9162, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.13837050523441055, | |
| "grad_norm": 0.07186371088027954, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 11.919, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.1401911697769686, | |
| "grad_norm": 0.04096841439604759, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 11.9182, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.14201183431952663, | |
| "grad_norm": 0.04554932937026024, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 11.9147, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.14383249886208466, | |
| "grad_norm": 0.036361921578645706, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 11.9184, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.14565316340464268, | |
| "grad_norm": 0.03751020133495331, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 11.9168, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.14747382794720074, | |
| "grad_norm": 0.033168449997901917, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 11.9182, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.14929449248975876, | |
| "grad_norm": 0.02722890116274357, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 11.9191, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.1511151570323168, | |
| "grad_norm": 0.048928141593933105, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 11.9201, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.15293582157487484, | |
| "grad_norm": 0.06329765170812607, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 11.9189, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.15475648611743287, | |
| "grad_norm": 0.04730033501982689, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 11.9143, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.1565771506599909, | |
| "grad_norm": 0.06353859603404999, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 11.9123, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.15839781520254892, | |
| "grad_norm": 0.06514666229486465, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 11.9186, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.16021847974510697, | |
| "grad_norm": 0.06427396088838577, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 11.9153, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.162039144287665, | |
| "grad_norm": 0.04870816320180893, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 11.9147, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.16385980883022302, | |
| "grad_norm": 0.058200716972351074, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 11.9163, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.16568047337278108, | |
| "grad_norm": 0.05362579971551895, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 11.9166, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.1675011379153391, | |
| "grad_norm": 0.06777684390544891, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 11.9144, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.16932180245789713, | |
| "grad_norm": 0.06118669733405113, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 11.9148, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.17114246700045516, | |
| "grad_norm": 0.06644754856824875, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 11.9175, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.1729631315430132, | |
| "grad_norm": 0.04351932927966118, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 11.9143, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.17478379608557124, | |
| "grad_norm": 0.09682610630989075, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 11.9128, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.17660446062812926, | |
| "grad_norm": 0.11314849555492401, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 11.9112, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.1784251251706873, | |
| "grad_norm": 0.09338361769914627, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 11.9097, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.18024578971324534, | |
| "grad_norm": 0.10834107547998428, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 11.9166, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.18206645425580337, | |
| "grad_norm": 0.179707333445549, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 11.9038, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18206645425580337, | |
| "eval_loss": 11.917658805847168, | |
| "eval_runtime": 5.0402, | |
| "eval_samples_per_second": 183.523, | |
| "eval_steps_per_second": 46.03, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1838871187983614, | |
| "grad_norm": 0.043141648173332214, | |
| "learning_rate": 5.330452921628497e-05, | |
| "loss": 11.9247, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.18570778334091945, | |
| "grad_norm": 0.05003609508275986, | |
| "learning_rate": 5.247918773366112e-05, | |
| "loss": 11.9214, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.18752844788347747, | |
| "grad_norm": 0.04550706967711449, | |
| "learning_rate": 5.165316846586541e-05, | |
| "loss": 11.9198, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.1893491124260355, | |
| "grad_norm": 0.047907501459121704, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 11.923, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.19116977696859352, | |
| "grad_norm": 0.0394832044839859, | |
| "learning_rate": 5e-05, | |
| "loss": 11.9205, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.19299044151115158, | |
| "grad_norm": 0.048778411000967026, | |
| "learning_rate": 4.917330276168208e-05, | |
| "loss": 11.9193, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.1948111060537096, | |
| "grad_norm": 0.03681888058781624, | |
| "learning_rate": 4.834683153413459e-05, | |
| "loss": 11.9217, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.19663177059626763, | |
| "grad_norm": 0.05845366790890694, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 11.9193, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.19845243513882568, | |
| "grad_norm": 0.041342005133628845, | |
| "learning_rate": 4.669547078371504e-05, | |
| "loss": 11.9228, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.2002730996813837, | |
| "grad_norm": 0.05567941814661026, | |
| "learning_rate": 4.5871032726383386e-05, | |
| "loss": 11.9207, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.20209376422394174, | |
| "grad_norm": 0.05213354527950287, | |
| "learning_rate": 4.504772348747687e-05, | |
| "loss": 11.9174, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.20391442876649976, | |
| "grad_norm": 0.05773822218179703, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 11.9201, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.20573509330905781, | |
| "grad_norm": 0.04012896120548248, | |
| "learning_rate": 4.3405391432896555e-05, | |
| "loss": 11.9171, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.20755575785161584, | |
| "grad_norm": 0.03765571862459183, | |
| "learning_rate": 4.2586817614407895e-05, | |
| "loss": 11.921, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.20937642239417387, | |
| "grad_norm": 0.04916438087821007, | |
| "learning_rate": 4.17702704859633e-05, | |
| "loss": 11.9183, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.21119708693673192, | |
| "grad_norm": 0.06308427453041077, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 11.9179, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.21301775147928995, | |
| "grad_norm": 0.06440264731645584, | |
| "learning_rate": 4.0144148627425993e-05, | |
| "loss": 11.9203, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.21483841602184797, | |
| "grad_norm": 0.04310610145330429, | |
| "learning_rate": 3.933501846281267e-05, | |
| "loss": 11.9183, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.216659080564406, | |
| "grad_norm": 0.02499895729124546, | |
| "learning_rate": 3.852880399766243e-05, | |
| "loss": 11.9223, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.21847974510696405, | |
| "grad_norm": 0.052417438477277756, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 11.9187, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.22030040964952208, | |
| "grad_norm": 0.07833024114370346, | |
| "learning_rate": 3.6926002952309016e-05, | |
| "loss": 11.9176, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.2221210741920801, | |
| "grad_norm": 0.040134355425834656, | |
| "learning_rate": 3.612985456190778e-05, | |
| "loss": 11.9186, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.22394173873463813, | |
| "grad_norm": 0.05635281652212143, | |
| "learning_rate": 3.533749813077677e-05, | |
| "loss": 11.9167, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.22576240327719618, | |
| "grad_norm": 0.04688984528183937, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 11.9218, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.2275830678197542, | |
| "grad_norm": 0.05027342960238457, | |
| "learning_rate": 3.3765026539765834e-05, | |
| "loss": 11.9168, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.22940373236231223, | |
| "grad_norm": 0.044746220111846924, | |
| "learning_rate": 3.298534127791785e-05, | |
| "loss": 11.918, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.2312243969048703, | |
| "grad_norm": 0.05197661742568016, | |
| "learning_rate": 3.221030765387417e-05, | |
| "loss": 11.918, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.23304506144742831, | |
| "grad_norm": 0.04914049431681633, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 11.9166, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.23486572598998634, | |
| "grad_norm": 0.04531894996762276, | |
| "learning_rate": 3.0675041535377405e-05, | |
| "loss": 11.9165, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.23668639053254437, | |
| "grad_norm": 0.056826937943696976, | |
| "learning_rate": 2.991522876735154e-05, | |
| "loss": 11.9148, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.23850705507510242, | |
| "grad_norm": 0.05140029639005661, | |
| "learning_rate": 2.916090697523549e-05, | |
| "loss": 11.9146, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.24032771961766045, | |
| "grad_norm": 0.04223676770925522, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 11.9161, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.24214838416021847, | |
| "grad_norm": 0.05060703679919243, | |
| "learning_rate": 2.766955965735968e-05, | |
| "loss": 11.9159, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.24396904870277653, | |
| "grad_norm": 0.05956428498029709, | |
| "learning_rate": 2.693294185106562e-05, | |
| "loss": 11.9198, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.24578971324533455, | |
| "grad_norm": 0.06842100620269775, | |
| "learning_rate": 2.6202630348146324e-05, | |
| "loss": 11.9196, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.24761037778789258, | |
| "grad_norm": 0.06286119669675827, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 11.9137, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.2494310423304506, | |
| "grad_norm": 0.04629463702440262, | |
| "learning_rate": 2.476172311325783e-05, | |
| "loss": 11.9135, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.25125170687300863, | |
| "grad_norm": 0.08288171142339706, | |
| "learning_rate": 2.405152131093926e-05, | |
| "loss": 11.915, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.25307237141556665, | |
| "grad_norm": 0.05736103653907776, | |
| "learning_rate": 2.3348413563600325e-05, | |
| "loss": 11.9104, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.25489303595812474, | |
| "grad_norm": 0.06756237149238586, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 11.9124, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.25671370050068276, | |
| "grad_norm": 0.06638480722904205, | |
| "learning_rate": 2.196424713241637e-05, | |
| "loss": 11.9117, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.2585343650432408, | |
| "grad_norm": 0.09103255718946457, | |
| "learning_rate": 2.128356686585282e-05, | |
| "loss": 11.913, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.2603550295857988, | |
| "grad_norm": 0.0980590358376503, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 11.9097, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.26217569412835684, | |
| "grad_norm": 0.06975159049034119, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 11.9124, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.26399635867091487, | |
| "grad_norm": 0.06156918779015541, | |
| "learning_rate": 1.928936436551661e-05, | |
| "loss": 11.9129, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.2658170232134729, | |
| "grad_norm": 0.09512455016374588, | |
| "learning_rate": 1.8641182076323148e-05, | |
| "loss": 11.9078, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.267637687756031, | |
| "grad_norm": 0.0983736664056778, | |
| "learning_rate": 1.800157297483417e-05, | |
| "loss": 11.9101, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.269458352298589, | |
| "grad_norm": 0.15988147258758545, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 11.8997, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.271279016841147, | |
| "grad_norm": 0.1824086308479309, | |
| "learning_rate": 1.6748771394307585e-05, | |
| "loss": 11.9061, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.27309968138370505, | |
| "grad_norm": 0.17297427356243134, | |
| "learning_rate": 1.6135921418712956e-05, | |
| "loss": 11.8977, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.27309968138370505, | |
| "eval_loss": 11.91677474975586, | |
| "eval_runtime": 5.0403, | |
| "eval_samples_per_second": 183.522, | |
| "eval_steps_per_second": 46.029, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2749203459262631, | |
| "grad_norm": 0.054472945630550385, | |
| "learning_rate": 1.553232954407171e-05, | |
| "loss": 11.9221, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.2767410104688211, | |
| "grad_norm": 0.06646860390901566, | |
| "learning_rate": 1.4938160786375572e-05, | |
| "loss": 11.9219, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.27856167501137913, | |
| "grad_norm": 0.036392826586961746, | |
| "learning_rate": 1.435357758543015e-05, | |
| "loss": 11.9211, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.2803823395539372, | |
| "grad_norm": 0.04787255451083183, | |
| "learning_rate": 1.3778739760445552e-05, | |
| "loss": 11.9228, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.28220300409649524, | |
| "grad_norm": 0.058999769389629364, | |
| "learning_rate": 1.3213804466343421e-05, | |
| "loss": 11.921, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.28402366863905326, | |
| "grad_norm": 0.06873305886983871, | |
| "learning_rate": 1.2658926150792322e-05, | |
| "loss": 11.9196, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.2858443331816113, | |
| "grad_norm": 0.07329442352056503, | |
| "learning_rate": 1.2114256511983274e-05, | |
| "loss": 11.9169, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.2876649977241693, | |
| "grad_norm": 0.046734023839235306, | |
| "learning_rate": 1.157994445715706e-05, | |
| "loss": 11.9196, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.28948566226672734, | |
| "grad_norm": 0.06411699950695038, | |
| "learning_rate": 1.1056136061894384e-05, | |
| "loss": 11.9197, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.29130632680928537, | |
| "grad_norm": 0.062398821115493774, | |
| "learning_rate": 1.0542974530180327e-05, | |
| "loss": 11.9219, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.29312699135184345, | |
| "grad_norm": 0.04219827428460121, | |
| "learning_rate": 1.0040600155253765e-05, | |
| "loss": 11.9192, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.2949476558944015, | |
| "grad_norm": 0.05368563532829285, | |
| "learning_rate": 9.549150281252633e-06, | |
| "loss": 11.9167, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.2967683204369595, | |
| "grad_norm": 0.06402570754289627, | |
| "learning_rate": 9.068759265665384e-06, | |
| "loss": 11.9202, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.2985889849795175, | |
| "grad_norm": 0.059248920530080795, | |
| "learning_rate": 8.599558442598998e-06, | |
| "loss": 11.9166, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.30040964952207555, | |
| "grad_norm": 0.04016771912574768, | |
| "learning_rate": 8.141676086873572e-06, | |
| "loss": 11.9199, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3022303140646336, | |
| "grad_norm": 0.054599128663539886, | |
| "learning_rate": 7.695237378953223e-06, | |
| "loss": 11.9169, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.3040509786071916, | |
| "grad_norm": 0.05375150218605995, | |
| "learning_rate": 7.260364370723044e-06, | |
| "loss": 11.9176, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.3058716431497497, | |
| "grad_norm": 0.04632212594151497, | |
| "learning_rate": 6.837175952121306e-06, | |
| "loss": 11.9198, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 0.0518006756901741, | |
| "learning_rate": 6.425787818636131e-06, | |
| "loss": 11.9178, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.30951297223486574, | |
| "grad_norm": 0.05097333341836929, | |
| "learning_rate": 6.026312439675552e-06, | |
| "loss": 11.9206, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.31133363677742376, | |
| "grad_norm": 0.05397026240825653, | |
| "learning_rate": 5.6388590278194096e-06, | |
| "loss": 11.9175, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.3131543013199818, | |
| "grad_norm": 0.06216050311923027, | |
| "learning_rate": 5.263533508961827e-06, | |
| "loss": 11.9177, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.3149749658625398, | |
| "grad_norm": 0.052663519978523254, | |
| "learning_rate": 4.900438493352055e-06, | |
| "loss": 11.9203, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.31679563040509784, | |
| "grad_norm": 0.06635887920856476, | |
| "learning_rate": 4.549673247541875e-06, | |
| "loss": 11.9143, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.3186162949476559, | |
| "grad_norm": 0.07404106110334396, | |
| "learning_rate": 4.2113336672471245e-06, | |
| "loss": 11.9174, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.32043695949021395, | |
| "grad_norm": 0.04695393890142441, | |
| "learning_rate": 3.885512251130763e-06, | |
| "loss": 11.915, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.32225762403277197, | |
| "grad_norm": 0.03491644933819771, | |
| "learning_rate": 3.5722980755146517e-06, | |
| "loss": 11.9182, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.32407828857533, | |
| "grad_norm": 0.059455081820487976, | |
| "learning_rate": 3.271776770026963e-06, | |
| "loss": 11.9159, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.325898953117888, | |
| "grad_norm": 0.0618712492287159, | |
| "learning_rate": 2.9840304941919415e-06, | |
| "loss": 11.9125, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.32771961766044605, | |
| "grad_norm": 0.053199317306280136, | |
| "learning_rate": 2.7091379149682685e-06, | |
| "loss": 11.9191, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3295402822030041, | |
| "grad_norm": 0.06174110621213913, | |
| "learning_rate": 2.4471741852423237e-06, | |
| "loss": 11.9122, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.33136094674556216, | |
| "grad_norm": 0.060066673904657364, | |
| "learning_rate": 2.1982109232821178e-06, | |
| "loss": 11.9147, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.3331816112881202, | |
| "grad_norm": 0.09699099510908127, | |
| "learning_rate": 1.962316193157593e-06, | |
| "loss": 11.9165, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.3350022758306782, | |
| "grad_norm": 0.0592498853802681, | |
| "learning_rate": 1.7395544861325718e-06, | |
| "loss": 11.9158, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.33682294037323623, | |
| "grad_norm": 0.058085668832063675, | |
| "learning_rate": 1.5299867030334814e-06, | |
| "loss": 11.9133, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.33864360491579426, | |
| "grad_norm": 0.06665123999118805, | |
| "learning_rate": 1.333670137599713e-06, | |
| "loss": 11.9161, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.3404642694583523, | |
| "grad_norm": 0.08766186237335205, | |
| "learning_rate": 1.1506584608200367e-06, | |
| "loss": 11.9147, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.3422849340009103, | |
| "grad_norm": 0.09656717628240585, | |
| "learning_rate": 9.810017062595322e-07, | |
| "loss": 11.9125, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.34410559854346834, | |
| "grad_norm": 0.05834021791815758, | |
| "learning_rate": 8.247462563808817e-07, | |
| "loss": 11.9139, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.3459262630860264, | |
| "grad_norm": 0.0666501596570015, | |
| "learning_rate": 6.819348298638839e-07, | |
| "loss": 11.9167, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.34774692762858445, | |
| "grad_norm": 0.06928762048482895, | |
| "learning_rate": 5.526064699265753e-07, | |
| "loss": 11.9133, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.34956759217114247, | |
| "grad_norm": 0.09626271575689316, | |
| "learning_rate": 4.367965336512403e-07, | |
| "loss": 11.9158, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.3513882567137005, | |
| "grad_norm": 0.07570219784975052, | |
| "learning_rate": 3.3453668231809286e-07, | |
| "loss": 11.9133, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.3532089212562585, | |
| "grad_norm": 0.06003417819738388, | |
| "learning_rate": 2.458548727494292e-07, | |
| "loss": 11.9131, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.35502958579881655, | |
| "grad_norm": 0.09867273271083832, | |
| "learning_rate": 1.7077534966650766e-07, | |
| "loss": 11.9096, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.3568502503413746, | |
| "grad_norm": 0.10562506318092346, | |
| "learning_rate": 1.0931863906127327e-07, | |
| "loss": 11.9041, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.35867091488393266, | |
| "grad_norm": 0.11728859692811966, | |
| "learning_rate": 6.150154258476315e-08, | |
| "loss": 11.9091, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.3604915794264907, | |
| "grad_norm": 0.10915789753198624, | |
| "learning_rate": 2.7337132953697554e-08, | |
| "loss": 11.9103, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.3623122439690487, | |
| "grad_norm": 0.16178463399410248, | |
| "learning_rate": 6.834750376549792e-09, | |
| "loss": 11.9047, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.36413290851160673, | |
| "grad_norm": 0.2064269334077835, | |
| "learning_rate": 0.0, | |
| "loss": 11.9007, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.36413290851160673, | |
| "eval_loss": 11.916446685791016, | |
| "eval_runtime": 5.0395, | |
| "eval_samples_per_second": 183.551, | |
| "eval_steps_per_second": 46.037, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 964165632000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |