| { | |
| "best_metric": 0.6300157308578491, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-150", | |
| "epoch": 0.21398002853067047, | |
| "eval_steps": 50, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0014265335235378032, | |
| "grad_norm": 0.5226402282714844, | |
| "learning_rate": 1e-05, | |
| "loss": 1.3512, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0014265335235378032, | |
| "eval_loss": 1.5449800491333008, | |
| "eval_runtime": 50.3239, | |
| "eval_samples_per_second": 23.468, | |
| "eval_steps_per_second": 5.882, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0028530670470756064, | |
| "grad_norm": 0.5333264470100403, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4012, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0042796005706134095, | |
| "grad_norm": 0.5963748097419739, | |
| "learning_rate": 3e-05, | |
| "loss": 1.4577, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.005706134094151213, | |
| "grad_norm": 0.5716367363929749, | |
| "learning_rate": 4e-05, | |
| "loss": 1.3906, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.007132667617689016, | |
| "grad_norm": 0.5622562766075134, | |
| "learning_rate": 5e-05, | |
| "loss": 1.3567, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.008559201141226819, | |
| "grad_norm": 0.5278612971305847, | |
| "learning_rate": 6e-05, | |
| "loss": 1.3034, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.009985734664764621, | |
| "grad_norm": 0.4572104215621948, | |
| "learning_rate": 7e-05, | |
| "loss": 1.3154, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.011412268188302425, | |
| "grad_norm": 0.4730938673019409, | |
| "learning_rate": 8e-05, | |
| "loss": 1.2504, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.012838801711840228, | |
| "grad_norm": 0.4776618182659149, | |
| "learning_rate": 9e-05, | |
| "loss": 1.1406, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.014265335235378032, | |
| "grad_norm": 0.4638929069042206, | |
| "learning_rate": 0.0001, | |
| "loss": 1.0836, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.015691868758915834, | |
| "grad_norm": 0.51563560962677, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 1.0827, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.017118402282453638, | |
| "grad_norm": 0.4722335636615753, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 1.0619, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.018544935805991442, | |
| "grad_norm": 0.41705870628356934, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 1.0141, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.019971469329529243, | |
| "grad_norm": 0.4042567014694214, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.9458, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.021398002853067047, | |
| "grad_norm": 0.4035939872264862, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.8679, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.02282453637660485, | |
| "grad_norm": 0.39228925108909607, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.9118, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.024251069900142655, | |
| "grad_norm": 0.3878542184829712, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.8138, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.025677603423680456, | |
| "grad_norm": 0.36501285433769226, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.8381, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.02710413694721826, | |
| "grad_norm": 0.34077632427215576, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.8206, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.028530670470756064, | |
| "grad_norm": 0.3434579074382782, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.8369, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.029957203994293864, | |
| "grad_norm": 0.3274579346179962, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.7728, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.03138373751783167, | |
| "grad_norm": 0.35434773564338684, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 0.8038, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.03281027104136947, | |
| "grad_norm": 0.3356912136077881, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.6974, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.034236804564907276, | |
| "grad_norm": 0.32218512892723083, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.7521, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.03566333808844508, | |
| "grad_norm": 0.33405324816703796, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 0.6765, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.037089871611982884, | |
| "grad_norm": 0.34837788343429565, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 0.7438, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.03851640513552068, | |
| "grad_norm": 0.35486212372779846, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 0.7171, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.039942938659058486, | |
| "grad_norm": 0.35810762643814087, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.7572, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.04136947218259629, | |
| "grad_norm": 0.3587457239627838, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.6637, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.042796005706134094, | |
| "grad_norm": 0.3609348237514496, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.6759, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0442225392296719, | |
| "grad_norm": 0.35466521978378296, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 0.7105, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.0456490727532097, | |
| "grad_norm": 0.3644118010997772, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.703, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.047075606276747506, | |
| "grad_norm": 0.3695703148841858, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 0.6622, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.04850213980028531, | |
| "grad_norm": 0.3753614127635956, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 0.623, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.04992867332382311, | |
| "grad_norm": 0.4202166497707367, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.6669, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.05135520684736091, | |
| "grad_norm": 0.3885855972766876, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.6423, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.052781740370898715, | |
| "grad_norm": 0.4230300784111023, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 0.6436, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.05420827389443652, | |
| "grad_norm": 0.4238419532775879, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 0.6399, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.05563480741797432, | |
| "grad_norm": 0.38183897733688354, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.6405, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.05706134094151213, | |
| "grad_norm": 0.42965343594551086, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 0.667, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05848787446504993, | |
| "grad_norm": 0.41244184970855713, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.6728, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.05991440798858773, | |
| "grad_norm": 0.46274593472480774, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.6393, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.06134094151212553, | |
| "grad_norm": 0.4266503155231476, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 0.6643, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.06276747503566334, | |
| "grad_norm": 0.43672090768814087, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.6692, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.06419400855920114, | |
| "grad_norm": 0.5269065499305725, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.7854, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.06562054208273894, | |
| "grad_norm": 0.532936692237854, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.8273, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.06704707560627675, | |
| "grad_norm": 0.5537221431732178, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 0.8122, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.06847360912981455, | |
| "grad_norm": 0.5947166681289673, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.8511, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.06990014265335236, | |
| "grad_norm": 0.7053452134132385, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 0.8737, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.07132667617689016, | |
| "grad_norm": 0.7121965885162354, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 0.8775, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07132667617689016, | |
| "eval_loss": 0.8270567655563354, | |
| "eval_runtime": 51.0262, | |
| "eval_samples_per_second": 23.145, | |
| "eval_steps_per_second": 5.801, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07275320970042796, | |
| "grad_norm": 1.1756590604782104, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 1.1036, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.07417974322396577, | |
| "grad_norm": 0.6972058415412903, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 1.0116, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.07560627674750357, | |
| "grad_norm": 0.4651179313659668, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 1.0018, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.07703281027104136, | |
| "grad_norm": 0.4065392017364502, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 0.8989, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.07845934379457917, | |
| "grad_norm": 0.36776724457740784, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 0.8447, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.07988587731811697, | |
| "grad_norm": 0.3628358542919159, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 0.8829, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.08131241084165478, | |
| "grad_norm": 0.374190092086792, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 0.8638, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.08273894436519258, | |
| "grad_norm": 0.3565217852592468, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 0.8325, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.08416547788873038, | |
| "grad_norm": 0.3390045762062073, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 0.8291, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.08559201141226819, | |
| "grad_norm": 0.3200775682926178, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 0.8356, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08701854493580599, | |
| "grad_norm": 0.2936284840106964, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 0.7451, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.0884450784593438, | |
| "grad_norm": 0.2914464473724365, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 0.7528, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.0898716119828816, | |
| "grad_norm": 0.3063439130783081, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 0.7651, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.0912981455064194, | |
| "grad_norm": 0.2865007221698761, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.7366, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.09272467902995721, | |
| "grad_norm": 0.2742938697338104, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 0.6642, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.09415121255349501, | |
| "grad_norm": 0.28679823875427246, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 0.7357, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.09557774607703282, | |
| "grad_norm": 0.26286858320236206, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 0.6506, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.09700427960057062, | |
| "grad_norm": 0.2730686664581299, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 0.6333, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.09843081312410841, | |
| "grad_norm": 0.26903438568115234, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 0.6549, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.09985734664764621, | |
| "grad_norm": 0.27704933285713196, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 0.5992, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.10128388017118402, | |
| "grad_norm": 0.29738301038742065, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 0.6667, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.10271041369472182, | |
| "grad_norm": 0.29280057549476624, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 0.6263, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.10413694721825963, | |
| "grad_norm": 0.30204644799232483, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 0.6407, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.10556348074179743, | |
| "grad_norm": 0.3014342188835144, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 0.6398, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.10699001426533523, | |
| "grad_norm": 0.29651400446891785, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 0.5796, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.10841654778887304, | |
| "grad_norm": 0.3081927001476288, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 0.5883, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.10984308131241084, | |
| "grad_norm": 0.30435124039649963, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 0.5776, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.11126961483594865, | |
| "grad_norm": 0.3181886374950409, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 0.6095, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.11269614835948645, | |
| "grad_norm": 0.32639774680137634, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 0.5686, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.11412268188302425, | |
| "grad_norm": 0.34764426946640015, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.6012, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.11554921540656206, | |
| "grad_norm": 0.29759591817855835, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 0.5706, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.11697574893009986, | |
| "grad_norm": 0.2997024953365326, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 0.5715, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.11840228245363767, | |
| "grad_norm": 0.3310617208480835, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 0.5828, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.11982881597717546, | |
| "grad_norm": 0.3212164044380188, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.6053, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.12125534950071326, | |
| "grad_norm": 0.3197632431983948, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 0.5949, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.12268188302425106, | |
| "grad_norm": 0.3209846615791321, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.5298, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.12410841654778887, | |
| "grad_norm": 0.3380773961544037, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 0.5865, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.12553495007132667, | |
| "grad_norm": 0.35354191064834595, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 0.5579, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.12696148359486448, | |
| "grad_norm": 0.34404274821281433, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 0.5323, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.12838801711840228, | |
| "grad_norm": 0.35105860233306885, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 0.5327, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.12981455064194009, | |
| "grad_norm": 0.37425288558006287, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 0.494, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.1312410841654779, | |
| "grad_norm": 0.3759922683238983, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 0.5267, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.1326676176890157, | |
| "grad_norm": 0.3858267664909363, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 0.53, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.1340941512125535, | |
| "grad_norm": 0.41338178515434265, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 0.5788, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.1355206847360913, | |
| "grad_norm": 0.4560675024986267, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 0.6647, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.1369472182596291, | |
| "grad_norm": 0.5184798240661621, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 0.7676, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.1383737517831669, | |
| "grad_norm": 0.5071970224380493, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 0.7084, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.1398002853067047, | |
| "grad_norm": 0.5310004353523254, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 0.7345, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.14122681883024252, | |
| "grad_norm": 0.5896197557449341, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 0.6743, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.14265335235378032, | |
| "grad_norm": 0.7096996903419495, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 0.8055, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14265335235378032, | |
| "eval_loss": 0.7116295099258423, | |
| "eval_runtime": 51.0281, | |
| "eval_samples_per_second": 23.144, | |
| "eval_steps_per_second": 5.801, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14407988587731813, | |
| "grad_norm": 0.6752876043319702, | |
| "learning_rate": 5.330452921628497e-05, | |
| "loss": 1.004, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.14550641940085593, | |
| "grad_norm": 0.5631501078605652, | |
| "learning_rate": 5.247918773366112e-05, | |
| "loss": 0.9638, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.14693295292439373, | |
| "grad_norm": 0.4354359805583954, | |
| "learning_rate": 5.165316846586541e-05, | |
| "loss": 0.839, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.14835948644793154, | |
| "grad_norm": 0.3501318693161011, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 0.8769, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.14978601997146934, | |
| "grad_norm": 0.3088143765926361, | |
| "learning_rate": 5e-05, | |
| "loss": 0.7795, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.15121255349500715, | |
| "grad_norm": 0.2911892235279083, | |
| "learning_rate": 4.917330276168208e-05, | |
| "loss": 0.8036, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.15263908701854492, | |
| "grad_norm": 0.31689971685409546, | |
| "learning_rate": 4.834683153413459e-05, | |
| "loss": 0.7908, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.15406562054208273, | |
| "grad_norm": 0.301882266998291, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 0.7665, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.15549215406562053, | |
| "grad_norm": 0.3027181029319763, | |
| "learning_rate": 4.669547078371504e-05, | |
| "loss": 0.7183, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.15691868758915833, | |
| "grad_norm": 0.2946275770664215, | |
| "learning_rate": 4.5871032726383386e-05, | |
| "loss": 0.726, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.15834522111269614, | |
| "grad_norm": 0.2899071276187897, | |
| "learning_rate": 4.504772348747687e-05, | |
| "loss": 0.6889, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.15977175463623394, | |
| "grad_norm": 0.3114294707775116, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 0.7008, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.16119828815977175, | |
| "grad_norm": 0.34090089797973633, | |
| "learning_rate": 4.3405391432896555e-05, | |
| "loss": 0.747, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.16262482168330955, | |
| "grad_norm": 0.3299725353717804, | |
| "learning_rate": 4.2586817614407895e-05, | |
| "loss": 0.6958, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.16405135520684735, | |
| "grad_norm": 0.33349403738975525, | |
| "learning_rate": 4.17702704859633e-05, | |
| "loss": 0.6789, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.16547788873038516, | |
| "grad_norm": 0.3047965168952942, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 0.565, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.16690442225392296, | |
| "grad_norm": 0.302217572927475, | |
| "learning_rate": 4.0144148627425993e-05, | |
| "loss": 0.6514, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.16833095577746077, | |
| "grad_norm": 0.29827603697776794, | |
| "learning_rate": 3.933501846281267e-05, | |
| "loss": 0.6366, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.16975748930099857, | |
| "grad_norm": 0.28933754563331604, | |
| "learning_rate": 3.852880399766243e-05, | |
| "loss": 0.5795, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.17118402282453637, | |
| "grad_norm": 0.2992074489593506, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 0.6064, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.17261055634807418, | |
| "grad_norm": 0.3025222420692444, | |
| "learning_rate": 3.6926002952309016e-05, | |
| "loss": 0.6027, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.17403708987161198, | |
| "grad_norm": 0.29351988434791565, | |
| "learning_rate": 3.612985456190778e-05, | |
| "loss": 0.5959, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.1754636233951498, | |
| "grad_norm": 0.309945672750473, | |
| "learning_rate": 3.533749813077677e-05, | |
| "loss": 0.6132, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.1768901569186876, | |
| "grad_norm": 0.3039770722389221, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.6146, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.1783166904422254, | |
| "grad_norm": 0.2933726906776428, | |
| "learning_rate": 3.3765026539765834e-05, | |
| "loss": 0.4949, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.1797432239657632, | |
| "grad_norm": 0.30167555809020996, | |
| "learning_rate": 3.298534127791785e-05, | |
| "loss": 0.586, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.181169757489301, | |
| "grad_norm": 0.3015027940273285, | |
| "learning_rate": 3.221030765387417e-05, | |
| "loss": 0.5899, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.1825962910128388, | |
| "grad_norm": 0.3031955361366272, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 0.5593, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.1840228245363766, | |
| "grad_norm": 0.31584692001342773, | |
| "learning_rate": 3.0675041535377405e-05, | |
| "loss": 0.584, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.18544935805991442, | |
| "grad_norm": 0.30260542035102844, | |
| "learning_rate": 2.991522876735154e-05, | |
| "loss": 0.5989, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.18687589158345222, | |
| "grad_norm": 0.3065981864929199, | |
| "learning_rate": 2.916090697523549e-05, | |
| "loss": 0.5288, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.18830242510699002, | |
| "grad_norm": 0.3048754930496216, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 0.5405, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.18972895863052783, | |
| "grad_norm": 0.3277784585952759, | |
| "learning_rate": 2.766955965735968e-05, | |
| "loss": 0.5799, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.19115549215406563, | |
| "grad_norm": 0.3249419629573822, | |
| "learning_rate": 2.693294185106562e-05, | |
| "loss": 0.5956, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.19258202567760344, | |
| "grad_norm": 0.3119509518146515, | |
| "learning_rate": 2.6202630348146324e-05, | |
| "loss": 0.4933, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.19400855920114124, | |
| "grad_norm": 0.32727769017219543, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 0.5665, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.19543509272467904, | |
| "grad_norm": 0.32874032855033875, | |
| "learning_rate": 2.476172311325783e-05, | |
| "loss": 0.5535, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.19686162624821682, | |
| "grad_norm": 0.3373146057128906, | |
| "learning_rate": 2.405152131093926e-05, | |
| "loss": 0.5447, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.19828815977175462, | |
| "grad_norm": 0.338911771774292, | |
| "learning_rate": 2.3348413563600325e-05, | |
| "loss": 0.5341, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.19971469329529243, | |
| "grad_norm": 0.3686864972114563, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 0.501, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.20114122681883023, | |
| "grad_norm": 0.3591432273387909, | |
| "learning_rate": 2.196424713241637e-05, | |
| "loss": 0.5341, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.20256776034236804, | |
| "grad_norm": 0.359300822019577, | |
| "learning_rate": 2.128356686585282e-05, | |
| "loss": 0.5424, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.20399429386590584, | |
| "grad_norm": 0.3716094493865967, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 0.5702, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.20542082738944364, | |
| "grad_norm": 0.41241884231567383, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 0.5296, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.20684736091298145, | |
| "grad_norm": 0.4353717565536499, | |
| "learning_rate": 1.928936436551661e-05, | |
| "loss": 0.6293, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.20827389443651925, | |
| "grad_norm": 0.5091322660446167, | |
| "learning_rate": 1.8641182076323148e-05, | |
| "loss": 0.7188, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.20970042796005706, | |
| "grad_norm": 0.542036235332489, | |
| "learning_rate": 1.800157297483417e-05, | |
| "loss": 0.7599, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.21112696148359486, | |
| "grad_norm": 0.552924394607544, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 0.7146, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.21255349500713266, | |
| "grad_norm": 0.5632199048995972, | |
| "learning_rate": 1.6748771394307585e-05, | |
| "loss": 0.7036, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.21398002853067047, | |
| "grad_norm": 0.6995756030082703, | |
| "learning_rate": 1.6135921418712956e-05, | |
| "loss": 0.7907, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.21398002853067047, | |
| "eval_loss": 0.6300157308578491, | |
| "eval_runtime": 50.9757, | |
| "eval_samples_per_second": 23.168, | |
| "eval_steps_per_second": 5.807, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.01030282919936e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |