| { | |
| "best_metric": 0.967810869216919, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-150", | |
| "epoch": 0.09752925877763328, | |
| "eval_steps": 50, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0006501950585175553, | |
| "grad_norm": 0.24665670096874237, | |
| "learning_rate": 1e-05, | |
| "loss": 0.9802, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0006501950585175553, | |
| "eval_loss": 1.2826741933822632, | |
| "eval_runtime": 285.8077, | |
| "eval_samples_per_second": 9.066, | |
| "eval_steps_per_second": 2.267, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0013003901170351106, | |
| "grad_norm": 0.2848244607448578, | |
| "learning_rate": 2e-05, | |
| "loss": 1.0425, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0019505851755526658, | |
| "grad_norm": 0.30411669611930847, | |
| "learning_rate": 3e-05, | |
| "loss": 0.9926, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.002600780234070221, | |
| "grad_norm": 0.29294437170028687, | |
| "learning_rate": 4e-05, | |
| "loss": 1.0576, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.003250975292587776, | |
| "grad_norm": 0.2660481631755829, | |
| "learning_rate": 5e-05, | |
| "loss": 1.0194, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0039011703511053317, | |
| "grad_norm": 0.22930045425891876, | |
| "learning_rate": 6e-05, | |
| "loss": 1.0524, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.004551365409622887, | |
| "grad_norm": 0.24841156601905823, | |
| "learning_rate": 7e-05, | |
| "loss": 0.9826, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.005201560468140442, | |
| "grad_norm": 0.29712122678756714, | |
| "learning_rate": 8e-05, | |
| "loss": 0.9425, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.005851755526657998, | |
| "grad_norm": 0.3075281083583832, | |
| "learning_rate": 9e-05, | |
| "loss": 1.0433, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.006501950585175552, | |
| "grad_norm": 0.29364797472953796, | |
| "learning_rate": 0.0001, | |
| "loss": 0.9954, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.007152145643693108, | |
| "grad_norm": 0.26937392354011536, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 1.047, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.007802340702210663, | |
| "grad_norm": 0.2536260485649109, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 1.0071, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.008452535760728219, | |
| "grad_norm": 0.2693924307823181, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.9835, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.009102730819245773, | |
| "grad_norm": 0.2723298966884613, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.9502, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.00975292587776333, | |
| "grad_norm": 0.30060645937919617, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 1.1353, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.010403120936280884, | |
| "grad_norm": 0.2966339588165283, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 1.0607, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.011053315994798439, | |
| "grad_norm": 0.29817983508110046, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 1.0409, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.011703511053315995, | |
| "grad_norm": 0.29946982860565186, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.9976, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.01235370611183355, | |
| "grad_norm": 0.31542253494262695, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 1.0846, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.013003901170351105, | |
| "grad_norm": 0.3208790719509125, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 1.0423, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.013654096228868661, | |
| "grad_norm": 0.3385937809944153, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.9577, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.014304291287386216, | |
| "grad_norm": 0.35345029830932617, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 1.1097, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.01495448634590377, | |
| "grad_norm": 0.3454449474811554, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.9912, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.015604681404421327, | |
| "grad_norm": 0.37376976013183594, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 1.0444, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.016254876462938883, | |
| "grad_norm": 0.40058329701423645, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 1.0641, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.016905071521456438, | |
| "grad_norm": 0.4044893682003021, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 1.0345, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.017555266579973992, | |
| "grad_norm": 0.4948595464229584, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 1.1932, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.018205461638491547, | |
| "grad_norm": 0.4825793206691742, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 1.0423, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.0188556566970091, | |
| "grad_norm": 0.4661653935909271, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.9377, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.01950585175552666, | |
| "grad_norm": 0.5018226504325867, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.9872, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.020156046814044214, | |
| "grad_norm": 0.5678668022155762, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 1.2176, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.02080624187256177, | |
| "grad_norm": 0.6238282918930054, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 1.0809, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.021456436931079324, | |
| "grad_norm": 0.6968820691108704, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 1.0575, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.022106631989596878, | |
| "grad_norm": 0.6839820742607117, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 1.0284, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.022756827048114433, | |
| "grad_norm": 0.8115527629852295, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.9048, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.02340702210663199, | |
| "grad_norm": 0.9514137506484985, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 1.2285, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.024057217165149546, | |
| "grad_norm": 0.9252752065658569, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 1.0755, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.0247074122236671, | |
| "grad_norm": 0.8530341386795044, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 1.0974, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.025357607282184655, | |
| "grad_norm": 1.0147374868392944, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.9775, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.02600780234070221, | |
| "grad_norm": 0.9851258397102356, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 1.1015, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.026657997399219768, | |
| "grad_norm": 0.9220011830329895, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.9734, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.027308192457737322, | |
| "grad_norm": 1.1411608457565308, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.895, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.027958387516254877, | |
| "grad_norm": 1.1889772415161133, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 1.0677, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.02860858257477243, | |
| "grad_norm": 1.5893051624298096, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.9913, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.029258777633289986, | |
| "grad_norm": 1.3745660781860352, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.9134, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.02990897269180754, | |
| "grad_norm": 1.3050737380981445, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.9877, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.0305591677503251, | |
| "grad_norm": 1.6141232252120972, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 0.9998, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.031209362808842653, | |
| "grad_norm": 1.4593380689620972, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.8818, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.03185955786736021, | |
| "grad_norm": 2.6730148792266846, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 1.0781, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.032509752925877766, | |
| "grad_norm": 1.7183367013931274, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 0.9071, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.032509752925877766, | |
| "eval_loss": 1.054203987121582, | |
| "eval_runtime": 287.5454, | |
| "eval_samples_per_second": 9.011, | |
| "eval_steps_per_second": 2.254, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03315994798439532, | |
| "grad_norm": 0.3386886715888977, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 1.0257, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.033810143042912875, | |
| "grad_norm": 0.3361884355545044, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 0.9614, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.03446033810143043, | |
| "grad_norm": 0.3084128499031067, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 1.0583, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.035110533159947985, | |
| "grad_norm": 0.2674861550331116, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 0.9485, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.03576072821846554, | |
| "grad_norm": 0.24764272570610046, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 0.948, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.036410923276983094, | |
| "grad_norm": 0.24586255848407745, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 1.0632, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.03706111833550065, | |
| "grad_norm": 0.23815371096134186, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 1.0403, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.0377113133940182, | |
| "grad_norm": 0.24708771705627441, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 0.9634, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.03836150845253576, | |
| "grad_norm": 0.2594657242298126, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 0.9434, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.03901170351105332, | |
| "grad_norm": 0.2581363916397095, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 1.0236, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.03966189856957087, | |
| "grad_norm": 0.28572535514831543, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 1.0167, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.04031209362808843, | |
| "grad_norm": 0.27552080154418945, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 1.0812, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.04096228868660598, | |
| "grad_norm": 0.2638622522354126, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 1.0145, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.04161248374512354, | |
| "grad_norm": 0.28032687306404114, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.9644, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.04226267880364109, | |
| "grad_norm": 0.2831994891166687, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 1.0199, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.04291287386215865, | |
| "grad_norm": 0.2876667380332947, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 1.0307, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.043563068920676205, | |
| "grad_norm": 0.30388614535331726, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 1.0459, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.044213263979193757, | |
| "grad_norm": 0.30665335059165955, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 1.005, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.044863459037711315, | |
| "grad_norm": 0.3081737160682678, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 0.9645, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.045513654096228866, | |
| "grad_norm": 0.34367161989212036, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 1.1134, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.046163849154746424, | |
| "grad_norm": 0.3611394762992859, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 1.0236, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.04681404421326398, | |
| "grad_norm": 0.3540930449962616, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 1.015, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.04746423927178153, | |
| "grad_norm": 0.3631446361541748, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 1.0907, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.04811443433029909, | |
| "grad_norm": 0.3827061057090759, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 0.9582, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.04876462938881664, | |
| "grad_norm": 0.37133488059043884, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 0.9584, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.0494148244473342, | |
| "grad_norm": 0.4201424717903137, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 1.1141, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.05006501950585176, | |
| "grad_norm": 0.446561723947525, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 1.04, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.05071521456436931, | |
| "grad_norm": 0.45327770709991455, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 0.911, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.05136540962288687, | |
| "grad_norm": 0.488609254360199, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 0.9535, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.05201560468140442, | |
| "grad_norm": 0.5082444548606873, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.9659, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.05266579973992198, | |
| "grad_norm": 0.5167144536972046, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 0.8035, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.053315994798439535, | |
| "grad_norm": 0.7916758060455322, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 0.9813, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.053966189856957086, | |
| "grad_norm": 0.6592386364936829, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 0.9619, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.054616384915474644, | |
| "grad_norm": 0.6843824982643127, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.9291, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.055266579973992196, | |
| "grad_norm": 0.7502142190933228, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 0.9775, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.055916775032509754, | |
| "grad_norm": 0.7345762252807617, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.9682, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.056566970091027305, | |
| "grad_norm": 1.0729334354400635, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 1.1668, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.05721716514954486, | |
| "grad_norm": 0.9465899467468262, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 1.066, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.05786736020806242, | |
| "grad_norm": 0.8934149146080017, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 1.0943, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.05851755526657997, | |
| "grad_norm": 0.8732466101646423, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 0.8848, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.05916775032509753, | |
| "grad_norm": 1.4022270441055298, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 1.0353, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.05981794538361508, | |
| "grad_norm": 1.1805094480514526, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 1.0968, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.06046814044213264, | |
| "grad_norm": 1.171386957168579, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 0.967, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.0611183355006502, | |
| "grad_norm": 1.0938076972961426, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 0.843, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.06176853055916775, | |
| "grad_norm": 1.217340111732483, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 1.0096, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.06241872561768531, | |
| "grad_norm": 1.255969762802124, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 0.9087, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.06306892067620286, | |
| "grad_norm": 1.2926132678985596, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 0.8561, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.06371911573472042, | |
| "grad_norm": 1.38904869556427, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 0.8413, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.06436931079323797, | |
| "grad_norm": 1.2619047164916992, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 0.7125, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.06501950585175553, | |
| "grad_norm": 1.9958139657974243, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 0.9771, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06501950585175553, | |
| "eval_loss": 1.0209071636199951, | |
| "eval_runtime": 287.4426, | |
| "eval_samples_per_second": 9.014, | |
| "eval_steps_per_second": 2.254, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06566970091027308, | |
| "grad_norm": 0.28578314185142517, | |
| "learning_rate": 5.330452921628497e-05, | |
| "loss": 0.9529, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.06631989596879063, | |
| "grad_norm": 0.2959822416305542, | |
| "learning_rate": 5.247918773366112e-05, | |
| "loss": 0.9612, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.06697009102730819, | |
| "grad_norm": 0.3045414984226227, | |
| "learning_rate": 5.165316846586541e-05, | |
| "loss": 0.964, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.06762028608582575, | |
| "grad_norm": 0.30599191784858704, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 1.0214, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.06827048114434331, | |
| "grad_norm": 0.2823450565338135, | |
| "learning_rate": 5e-05, | |
| "loss": 0.9976, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.06892067620286085, | |
| "grad_norm": 0.2601260840892792, | |
| "learning_rate": 4.917330276168208e-05, | |
| "loss": 0.9564, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.06957087126137841, | |
| "grad_norm": 0.25662875175476074, | |
| "learning_rate": 4.834683153413459e-05, | |
| "loss": 1.0077, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.07022106631989597, | |
| "grad_norm": 0.262192964553833, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 1.0157, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.07087126137841353, | |
| "grad_norm": 0.24229338765144348, | |
| "learning_rate": 4.669547078371504e-05, | |
| "loss": 0.9839, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.07152145643693109, | |
| "grad_norm": 0.2540320158004761, | |
| "learning_rate": 4.5871032726383386e-05, | |
| "loss": 1.0461, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.07217165149544863, | |
| "grad_norm": 0.24910147488117218, | |
| "learning_rate": 4.504772348747687e-05, | |
| "loss": 0.9477, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.07282184655396619, | |
| "grad_norm": 0.25390809774398804, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 1.0516, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.07347204161248375, | |
| "grad_norm": 0.2431602030992508, | |
| "learning_rate": 4.3405391432896555e-05, | |
| "loss": 0.951, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.0741222366710013, | |
| "grad_norm": 0.2595900893211365, | |
| "learning_rate": 4.2586817614407895e-05, | |
| "loss": 0.9707, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.07477243172951886, | |
| "grad_norm": 0.2788005471229553, | |
| "learning_rate": 4.17702704859633e-05, | |
| "loss": 1.0803, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.0754226267880364, | |
| "grad_norm": 0.2638980746269226, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 0.9675, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.07607282184655396, | |
| "grad_norm": 0.28652435541152954, | |
| "learning_rate": 4.0144148627425993e-05, | |
| "loss": 1.0481, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.07672301690507152, | |
| "grad_norm": 0.30259060859680176, | |
| "learning_rate": 3.933501846281267e-05, | |
| "loss": 1.0613, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.07737321196358908, | |
| "grad_norm": 0.30677303671836853, | |
| "learning_rate": 3.852880399766243e-05, | |
| "loss": 1.0147, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.07802340702210664, | |
| "grad_norm": 0.3172067701816559, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 1.0589, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.07867360208062418, | |
| "grad_norm": 0.34852272272109985, | |
| "learning_rate": 3.6926002952309016e-05, | |
| "loss": 0.9957, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.07932379713914174, | |
| "grad_norm": 0.3528114855289459, | |
| "learning_rate": 3.612985456190778e-05, | |
| "loss": 1.0282, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.0799739921976593, | |
| "grad_norm": 0.36736923456192017, | |
| "learning_rate": 3.533749813077677e-05, | |
| "loss": 1.0874, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.08062418725617686, | |
| "grad_norm": 0.36461472511291504, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.9643, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.0812743823146944, | |
| "grad_norm": 0.3944953382015228, | |
| "learning_rate": 3.3765026539765834e-05, | |
| "loss": 1.1273, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.08192457737321196, | |
| "grad_norm": 0.4044293165206909, | |
| "learning_rate": 3.298534127791785e-05, | |
| "loss": 0.9938, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.08257477243172952, | |
| "grad_norm": 0.4210696220397949, | |
| "learning_rate": 3.221030765387417e-05, | |
| "loss": 0.9924, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.08322496749024708, | |
| "grad_norm": 0.39872685074806213, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 0.8994, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.08387516254876463, | |
| "grad_norm": 0.45271793007850647, | |
| "learning_rate": 3.0675041535377405e-05, | |
| "loss": 0.9762, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.08452535760728218, | |
| "grad_norm": 0.47246426343917847, | |
| "learning_rate": 2.991522876735154e-05, | |
| "loss": 0.8418, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.08517555266579974, | |
| "grad_norm": 0.5041539669036865, | |
| "learning_rate": 2.916090697523549e-05, | |
| "loss": 1.0256, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.0858257477243173, | |
| "grad_norm": 0.5535305142402649, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 1.005, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.08647594278283485, | |
| "grad_norm": 0.5784163475036621, | |
| "learning_rate": 2.766955965735968e-05, | |
| "loss": 0.9101, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.08712613784135241, | |
| "grad_norm": 0.6768215894699097, | |
| "learning_rate": 2.693294185106562e-05, | |
| "loss": 1.0046, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.08777633289986995, | |
| "grad_norm": 0.6812559366226196, | |
| "learning_rate": 2.6202630348146324e-05, | |
| "loss": 0.9294, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.08842652795838751, | |
| "grad_norm": 0.6950148344039917, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 0.976, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.08907672301690507, | |
| "grad_norm": 0.6829755902290344, | |
| "learning_rate": 2.476172311325783e-05, | |
| "loss": 0.8025, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.08972691807542263, | |
| "grad_norm": 0.8887941837310791, | |
| "learning_rate": 2.405152131093926e-05, | |
| "loss": 1.0147, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.09037711313394019, | |
| "grad_norm": 0.853938102722168, | |
| "learning_rate": 2.3348413563600325e-05, | |
| "loss": 0.9148, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.09102730819245773, | |
| "grad_norm": 0.9662453532218933, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 1.0474, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.09167750325097529, | |
| "grad_norm": 0.9367502927780151, | |
| "learning_rate": 2.196424713241637e-05, | |
| "loss": 0.8101, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.09232769830949285, | |
| "grad_norm": 0.881011426448822, | |
| "learning_rate": 2.128356686585282e-05, | |
| "loss": 0.781, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.0929778933680104, | |
| "grad_norm": 0.9794203639030457, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 0.9776, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.09362808842652796, | |
| "grad_norm": 1.0850703716278076, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 0.9974, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.09427828348504551, | |
| "grad_norm": 0.9639971256256104, | |
| "learning_rate": 1.928936436551661e-05, | |
| "loss": 0.7316, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.09492847854356307, | |
| "grad_norm": 1.1075059175491333, | |
| "learning_rate": 1.8641182076323148e-05, | |
| "loss": 0.8347, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.09557867360208062, | |
| "grad_norm": 1.1604466438293457, | |
| "learning_rate": 1.800157297483417e-05, | |
| "loss": 0.6836, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.09622886866059818, | |
| "grad_norm": 1.4200111627578735, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 0.9519, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.09687906371911574, | |
| "grad_norm": 1.2869455814361572, | |
| "learning_rate": 1.6748771394307585e-05, | |
| "loss": 0.823, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.09752925877763328, | |
| "grad_norm": 1.7294621467590332, | |
| "learning_rate": 1.6135921418712956e-05, | |
| "loss": 0.7738, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.09752925877763328, | |
| "eval_loss": 0.967810869216919, | |
| "eval_runtime": 287.4762, | |
| "eval_samples_per_second": 9.013, | |
| "eval_steps_per_second": 2.254, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1363877497274368e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |