prxy5607's picture
Training in progress, step 150, checkpoint
b44f895 verified
raw
history blame
27.8 kB
{
"best_metric": 0.5023409128189087,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.49504950495049505,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0033003300330033004,
"grad_norm": 0.13191445171833038,
"learning_rate": 1e-05,
"loss": 1.0502,
"step": 1
},
{
"epoch": 0.0033003300330033004,
"eval_loss": 1.2239747047424316,
"eval_runtime": 19.2605,
"eval_samples_per_second": 26.531,
"eval_steps_per_second": 6.646,
"step": 1
},
{
"epoch": 0.006600660066006601,
"grad_norm": 0.14231789112091064,
"learning_rate": 2e-05,
"loss": 1.1435,
"step": 2
},
{
"epoch": 0.009900990099009901,
"grad_norm": 0.14200475811958313,
"learning_rate": 3e-05,
"loss": 1.1685,
"step": 3
},
{
"epoch": 0.013201320132013201,
"grad_norm": 0.14261862635612488,
"learning_rate": 4e-05,
"loss": 1.1378,
"step": 4
},
{
"epoch": 0.0165016501650165,
"grad_norm": 0.14832687377929688,
"learning_rate": 5e-05,
"loss": 1.1728,
"step": 5
},
{
"epoch": 0.019801980198019802,
"grad_norm": 0.14184178411960602,
"learning_rate": 6e-05,
"loss": 1.1733,
"step": 6
},
{
"epoch": 0.0231023102310231,
"grad_norm": 0.147707998752594,
"learning_rate": 7e-05,
"loss": 1.1587,
"step": 7
},
{
"epoch": 0.026402640264026403,
"grad_norm": 0.13911840319633484,
"learning_rate": 8e-05,
"loss": 1.1527,
"step": 8
},
{
"epoch": 0.0297029702970297,
"grad_norm": 0.12455406785011292,
"learning_rate": 9e-05,
"loss": 1.1458,
"step": 9
},
{
"epoch": 0.033003300330033,
"grad_norm": 0.11690079420804977,
"learning_rate": 0.0001,
"loss": 1.1124,
"step": 10
},
{
"epoch": 0.036303630363036306,
"grad_norm": 0.11457813531160355,
"learning_rate": 9.999316524962345e-05,
"loss": 1.0481,
"step": 11
},
{
"epoch": 0.039603960396039604,
"grad_norm": 0.12541747093200684,
"learning_rate": 9.997266286704631e-05,
"loss": 1.0753,
"step": 12
},
{
"epoch": 0.0429042904290429,
"grad_norm": 0.1269989311695099,
"learning_rate": 9.993849845741524e-05,
"loss": 1.0036,
"step": 13
},
{
"epoch": 0.0462046204620462,
"grad_norm": 0.12732452154159546,
"learning_rate": 9.989068136093873e-05,
"loss": 1.0053,
"step": 14
},
{
"epoch": 0.04950495049504951,
"grad_norm": 0.12474920600652695,
"learning_rate": 9.98292246503335e-05,
"loss": 0.9768,
"step": 15
},
{
"epoch": 0.052805280528052806,
"grad_norm": 0.1276063323020935,
"learning_rate": 9.975414512725057e-05,
"loss": 0.9214,
"step": 16
},
{
"epoch": 0.056105610561056105,
"grad_norm": 0.13297142088413239,
"learning_rate": 9.966546331768191e-05,
"loss": 0.8668,
"step": 17
},
{
"epoch": 0.0594059405940594,
"grad_norm": 0.12752574682235718,
"learning_rate": 9.956320346634876e-05,
"loss": 0.8715,
"step": 18
},
{
"epoch": 0.0627062706270627,
"grad_norm": 0.11911755800247192,
"learning_rate": 9.944739353007344e-05,
"loss": 0.8293,
"step": 19
},
{
"epoch": 0.066006600660066,
"grad_norm": 0.12269636243581772,
"learning_rate": 9.931806517013612e-05,
"loss": 0.8354,
"step": 20
},
{
"epoch": 0.06930693069306931,
"grad_norm": 0.11714192479848862,
"learning_rate": 9.917525374361912e-05,
"loss": 0.789,
"step": 21
},
{
"epoch": 0.07260726072607261,
"grad_norm": 0.1211303249001503,
"learning_rate": 9.901899829374047e-05,
"loss": 0.7905,
"step": 22
},
{
"epoch": 0.07590759075907591,
"grad_norm": 0.12314417213201523,
"learning_rate": 9.884934153917997e-05,
"loss": 0.7648,
"step": 23
},
{
"epoch": 0.07920792079207921,
"grad_norm": 0.1339118629693985,
"learning_rate": 9.86663298624003e-05,
"loss": 0.7754,
"step": 24
},
{
"epoch": 0.08250825082508251,
"grad_norm": 0.13359405100345612,
"learning_rate": 9.847001329696653e-05,
"loss": 0.7686,
"step": 25
},
{
"epoch": 0.0858085808580858,
"grad_norm": 0.11699782311916351,
"learning_rate": 9.826044551386744e-05,
"loss": 0.7366,
"step": 26
},
{
"epoch": 0.0891089108910891,
"grad_norm": 0.11440645903348923,
"learning_rate": 9.803768380684242e-05,
"loss": 0.6898,
"step": 27
},
{
"epoch": 0.0924092409240924,
"grad_norm": 0.11616159230470657,
"learning_rate": 9.780178907671789e-05,
"loss": 0.6708,
"step": 28
},
{
"epoch": 0.09570957095709572,
"grad_norm": 0.11759506165981293,
"learning_rate": 9.755282581475769e-05,
"loss": 0.7076,
"step": 29
},
{
"epoch": 0.09900990099009901,
"grad_norm": 0.09457705169916153,
"learning_rate": 9.729086208503174e-05,
"loss": 0.6778,
"step": 30
},
{
"epoch": 0.10231023102310231,
"grad_norm": 0.10516991466283798,
"learning_rate": 9.701596950580806e-05,
"loss": 0.6753,
"step": 31
},
{
"epoch": 0.10561056105610561,
"grad_norm": 0.10476139932870865,
"learning_rate": 9.672822322997305e-05,
"loss": 0.668,
"step": 32
},
{
"epoch": 0.10891089108910891,
"grad_norm": 0.09963483363389969,
"learning_rate": 9.642770192448536e-05,
"loss": 0.6619,
"step": 33
},
{
"epoch": 0.11221122112211221,
"grad_norm": 0.10152124613523483,
"learning_rate": 9.611448774886924e-05,
"loss": 0.6418,
"step": 34
},
{
"epoch": 0.11551155115511551,
"grad_norm": 0.09857919067144394,
"learning_rate": 9.578866633275288e-05,
"loss": 0.63,
"step": 35
},
{
"epoch": 0.1188118811881188,
"grad_norm": 0.09648553282022476,
"learning_rate": 9.545032675245813e-05,
"loss": 0.618,
"step": 36
},
{
"epoch": 0.12211221122112212,
"grad_norm": 0.09807959198951721,
"learning_rate": 9.509956150664796e-05,
"loss": 0.6127,
"step": 37
},
{
"epoch": 0.1254125412541254,
"grad_norm": 0.09354656934738159,
"learning_rate": 9.473646649103818e-05,
"loss": 0.6147,
"step": 38
},
{
"epoch": 0.12871287128712872,
"grad_norm": 0.0952218547463417,
"learning_rate": 9.43611409721806e-05,
"loss": 0.6261,
"step": 39
},
{
"epoch": 0.132013201320132,
"grad_norm": 0.09872277826070786,
"learning_rate": 9.397368756032445e-05,
"loss": 0.6259,
"step": 40
},
{
"epoch": 0.1353135313531353,
"grad_norm": 0.09944599121809006,
"learning_rate": 9.357421218136386e-05,
"loss": 0.5878,
"step": 41
},
{
"epoch": 0.13861386138613863,
"grad_norm": 0.10035327076911926,
"learning_rate": 9.316282404787871e-05,
"loss": 0.594,
"step": 42
},
{
"epoch": 0.1419141914191419,
"grad_norm": 0.09558921307325363,
"learning_rate": 9.273963562927695e-05,
"loss": 0.5838,
"step": 43
},
{
"epoch": 0.14521452145214522,
"grad_norm": 0.10590150207281113,
"learning_rate": 9.230476262104677e-05,
"loss": 0.6392,
"step": 44
},
{
"epoch": 0.1485148514851485,
"grad_norm": 0.1094168871641159,
"learning_rate": 9.185832391312644e-05,
"loss": 0.6628,
"step": 45
},
{
"epoch": 0.15181518151815182,
"grad_norm": 0.09918393194675446,
"learning_rate": 9.140044155740101e-05,
"loss": 0.6344,
"step": 46
},
{
"epoch": 0.1551155115511551,
"grad_norm": 0.09946653246879578,
"learning_rate": 9.093124073433463e-05,
"loss": 0.5984,
"step": 47
},
{
"epoch": 0.15841584158415842,
"grad_norm": 0.0981588140130043,
"learning_rate": 9.045084971874738e-05,
"loss": 0.5803,
"step": 48
},
{
"epoch": 0.1617161716171617,
"grad_norm": 0.10726366937160492,
"learning_rate": 8.995939984474624e-05,
"loss": 0.6035,
"step": 49
},
{
"epoch": 0.16501650165016502,
"grad_norm": 0.1146295815706253,
"learning_rate": 8.945702546981969e-05,
"loss": 0.6874,
"step": 50
},
{
"epoch": 0.16501650165016502,
"eval_loss": 0.589276134967804,
"eval_runtime": 19.6322,
"eval_samples_per_second": 26.029,
"eval_steps_per_second": 6.52,
"step": 50
},
{
"epoch": 0.16831683168316833,
"grad_norm": 0.12205219268798828,
"learning_rate": 8.894386393810563e-05,
"loss": 0.6211,
"step": 51
},
{
"epoch": 0.1716171617161716,
"grad_norm": 0.10567279905080795,
"learning_rate": 8.842005554284296e-05,
"loss": 0.6298,
"step": 52
},
{
"epoch": 0.17491749174917492,
"grad_norm": 0.08974870294332504,
"learning_rate": 8.788574348801675e-05,
"loss": 0.5842,
"step": 53
},
{
"epoch": 0.1782178217821782,
"grad_norm": 0.08729631453752518,
"learning_rate": 8.73410738492077e-05,
"loss": 0.6124,
"step": 54
},
{
"epoch": 0.18151815181518152,
"grad_norm": 0.08566755801439285,
"learning_rate": 8.678619553365659e-05,
"loss": 0.6069,
"step": 55
},
{
"epoch": 0.1848184818481848,
"grad_norm": 0.09104456752538681,
"learning_rate": 8.622126023955446e-05,
"loss": 0.6047,
"step": 56
},
{
"epoch": 0.18811881188118812,
"grad_norm": 0.08168845623731613,
"learning_rate": 8.564642241456986e-05,
"loss": 0.5828,
"step": 57
},
{
"epoch": 0.19141914191419143,
"grad_norm": 0.08753436803817749,
"learning_rate": 8.506183921362443e-05,
"loss": 0.5769,
"step": 58
},
{
"epoch": 0.19471947194719472,
"grad_norm": 0.09024737030267715,
"learning_rate": 8.44676704559283e-05,
"loss": 0.5895,
"step": 59
},
{
"epoch": 0.19801980198019803,
"grad_norm": 0.08733727037906647,
"learning_rate": 8.386407858128706e-05,
"loss": 0.585,
"step": 60
},
{
"epoch": 0.20132013201320131,
"grad_norm": 0.09227323532104492,
"learning_rate": 8.32512286056924e-05,
"loss": 0.6028,
"step": 61
},
{
"epoch": 0.20462046204620463,
"grad_norm": 0.09184302389621735,
"learning_rate": 8.262928807620843e-05,
"loss": 0.5811,
"step": 62
},
{
"epoch": 0.2079207920792079,
"grad_norm": 0.0868532657623291,
"learning_rate": 8.199842702516583e-05,
"loss": 0.55,
"step": 63
},
{
"epoch": 0.21122112211221122,
"grad_norm": 0.08939136564731598,
"learning_rate": 8.135881792367686e-05,
"loss": 0.5979,
"step": 64
},
{
"epoch": 0.2145214521452145,
"grad_norm": 0.07770807296037674,
"learning_rate": 8.07106356344834e-05,
"loss": 0.5475,
"step": 65
},
{
"epoch": 0.21782178217821782,
"grad_norm": 0.08185362070798874,
"learning_rate": 8.005405736415126e-05,
"loss": 0.5551,
"step": 66
},
{
"epoch": 0.22112211221122113,
"grad_norm": 0.07696875929832458,
"learning_rate": 7.938926261462366e-05,
"loss": 0.5407,
"step": 67
},
{
"epoch": 0.22442244224422442,
"grad_norm": 0.08806393295526505,
"learning_rate": 7.871643313414718e-05,
"loss": 0.5661,
"step": 68
},
{
"epoch": 0.22772277227722773,
"grad_norm": 0.08568396419286728,
"learning_rate": 7.803575286758364e-05,
"loss": 0.5782,
"step": 69
},
{
"epoch": 0.23102310231023102,
"grad_norm": 0.0884920135140419,
"learning_rate": 7.734740790612136e-05,
"loss": 0.5451,
"step": 70
},
{
"epoch": 0.23432343234323433,
"grad_norm": 0.08179255574941635,
"learning_rate": 7.66515864363997e-05,
"loss": 0.5143,
"step": 71
},
{
"epoch": 0.2376237623762376,
"grad_norm": 0.09769521653652191,
"learning_rate": 7.594847868906076e-05,
"loss": 0.6026,
"step": 72
},
{
"epoch": 0.24092409240924093,
"grad_norm": 0.08831746876239777,
"learning_rate": 7.52382768867422e-05,
"loss": 0.556,
"step": 73
},
{
"epoch": 0.24422442244224424,
"grad_norm": 0.09301641583442688,
"learning_rate": 7.452117519152542e-05,
"loss": 0.5409,
"step": 74
},
{
"epoch": 0.24752475247524752,
"grad_norm": 0.09096867591142654,
"learning_rate": 7.379736965185368e-05,
"loss": 0.5713,
"step": 75
},
{
"epoch": 0.2508250825082508,
"grad_norm": 0.09333696216344833,
"learning_rate": 7.30670581489344e-05,
"loss": 0.553,
"step": 76
},
{
"epoch": 0.25412541254125415,
"grad_norm": 0.08953896909952164,
"learning_rate": 7.233044034264034e-05,
"loss": 0.517,
"step": 77
},
{
"epoch": 0.25742574257425743,
"grad_norm": 0.09030700474977493,
"learning_rate": 7.158771761692464e-05,
"loss": 0.5627,
"step": 78
},
{
"epoch": 0.2607260726072607,
"grad_norm": 0.08921173214912415,
"learning_rate": 7.083909302476453e-05,
"loss": 0.5142,
"step": 79
},
{
"epoch": 0.264026402640264,
"grad_norm": 0.09332939237356186,
"learning_rate": 7.008477123264848e-05,
"loss": 0.5529,
"step": 80
},
{
"epoch": 0.26732673267326734,
"grad_norm": 0.08547434210777283,
"learning_rate": 6.932495846462261e-05,
"loss": 0.5029,
"step": 81
},
{
"epoch": 0.2706270627062706,
"grad_norm": 0.09413249045610428,
"learning_rate": 6.855986244591104e-05,
"loss": 0.5349,
"step": 82
},
{
"epoch": 0.2739273927392739,
"grad_norm": 0.09226549416780472,
"learning_rate": 6.778969234612584e-05,
"loss": 0.5033,
"step": 83
},
{
"epoch": 0.27722772277227725,
"grad_norm": 0.09563800692558289,
"learning_rate": 6.701465872208216e-05,
"loss": 0.496,
"step": 84
},
{
"epoch": 0.28052805280528054,
"grad_norm": 0.09730054438114166,
"learning_rate": 6.623497346023418e-05,
"loss": 0.5311,
"step": 85
},
{
"epoch": 0.2838283828382838,
"grad_norm": 0.12224038690328598,
"learning_rate": 6.545084971874738e-05,
"loss": 0.6106,
"step": 86
},
{
"epoch": 0.2871287128712871,
"grad_norm": 0.09914378076791763,
"learning_rate": 6.466250186922325e-05,
"loss": 0.5486,
"step": 87
},
{
"epoch": 0.29042904290429045,
"grad_norm": 0.09854406118392944,
"learning_rate": 6.387014543809223e-05,
"loss": 0.5229,
"step": 88
},
{
"epoch": 0.29372937293729373,
"grad_norm": 0.10298023372888565,
"learning_rate": 6.307399704769099e-05,
"loss": 0.5448,
"step": 89
},
{
"epoch": 0.297029702970297,
"grad_norm": 0.101314015686512,
"learning_rate": 6.227427435703997e-05,
"loss": 0.5167,
"step": 90
},
{
"epoch": 0.30033003300330036,
"grad_norm": 0.10212904214859009,
"learning_rate": 6.147119600233758e-05,
"loss": 0.5365,
"step": 91
},
{
"epoch": 0.30363036303630364,
"grad_norm": 0.10842400789260864,
"learning_rate": 6.066498153718735e-05,
"loss": 0.5437,
"step": 92
},
{
"epoch": 0.3069306930693069,
"grad_norm": 0.1008092612028122,
"learning_rate": 5.985585137257401e-05,
"loss": 0.5071,
"step": 93
},
{
"epoch": 0.3102310231023102,
"grad_norm": 0.11197441816329956,
"learning_rate": 5.90440267166055e-05,
"loss": 0.5209,
"step": 94
},
{
"epoch": 0.31353135313531355,
"grad_norm": 0.10850328952074051,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.5138,
"step": 95
},
{
"epoch": 0.31683168316831684,
"grad_norm": 0.12687678635120392,
"learning_rate": 5.74131823855921e-05,
"loss": 0.5197,
"step": 96
},
{
"epoch": 0.3201320132013201,
"grad_norm": 0.10580695420503616,
"learning_rate": 5.6594608567103456e-05,
"loss": 0.5218,
"step": 97
},
{
"epoch": 0.3234323432343234,
"grad_norm": 0.12822967767715454,
"learning_rate": 5.577423184847932e-05,
"loss": 0.5665,
"step": 98
},
{
"epoch": 0.32673267326732675,
"grad_norm": 0.11881717294454575,
"learning_rate": 5.495227651252315e-05,
"loss": 0.5514,
"step": 99
},
{
"epoch": 0.33003300330033003,
"grad_norm": 0.12876836955547333,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.582,
"step": 100
},
{
"epoch": 0.33003300330033003,
"eval_loss": 0.523641049861908,
"eval_runtime": 19.5384,
"eval_samples_per_second": 26.154,
"eval_steps_per_second": 6.551,
"step": 100
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.12627676129341125,
"learning_rate": 5.330452921628497e-05,
"loss": 0.5321,
"step": 101
},
{
"epoch": 0.33663366336633666,
"grad_norm": 0.10751184076070786,
"learning_rate": 5.247918773366112e-05,
"loss": 0.5947,
"step": 102
},
{
"epoch": 0.33993399339933994,
"grad_norm": 0.10532073676586151,
"learning_rate": 5.165316846586541e-05,
"loss": 0.5408,
"step": 103
},
{
"epoch": 0.3432343234323432,
"grad_norm": 0.10113536566495895,
"learning_rate": 5.0826697238317935e-05,
"loss": 0.5425,
"step": 104
},
{
"epoch": 0.3465346534653465,
"grad_norm": 0.10064956545829773,
"learning_rate": 5e-05,
"loss": 0.5199,
"step": 105
},
{
"epoch": 0.34983498349834985,
"grad_norm": 0.09943439066410065,
"learning_rate": 4.917330276168208e-05,
"loss": 0.5369,
"step": 106
},
{
"epoch": 0.35313531353135313,
"grad_norm": 0.0966525748372078,
"learning_rate": 4.834683153413459e-05,
"loss": 0.536,
"step": 107
},
{
"epoch": 0.3564356435643564,
"grad_norm": 0.09476858377456665,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.5125,
"step": 108
},
{
"epoch": 0.35973597359735976,
"grad_norm": 0.10993063449859619,
"learning_rate": 4.669547078371504e-05,
"loss": 0.5671,
"step": 109
},
{
"epoch": 0.36303630363036304,
"grad_norm": 0.10246788710355759,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.542,
"step": 110
},
{
"epoch": 0.36633663366336633,
"grad_norm": 0.09605566412210464,
"learning_rate": 4.504772348747687e-05,
"loss": 0.5367,
"step": 111
},
{
"epoch": 0.3696369636963696,
"grad_norm": 0.09681887924671173,
"learning_rate": 4.4225768151520694e-05,
"loss": 0.5293,
"step": 112
},
{
"epoch": 0.37293729372937295,
"grad_norm": 0.10017819702625275,
"learning_rate": 4.3405391432896555e-05,
"loss": 0.5246,
"step": 113
},
{
"epoch": 0.37623762376237624,
"grad_norm": 0.10355076193809509,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.5268,
"step": 114
},
{
"epoch": 0.3795379537953795,
"grad_norm": 0.09535422176122665,
"learning_rate": 4.17702704859633e-05,
"loss": 0.5188,
"step": 115
},
{
"epoch": 0.38283828382838286,
"grad_norm": 0.10046651214361191,
"learning_rate": 4.095597328339452e-05,
"loss": 0.5344,
"step": 116
},
{
"epoch": 0.38613861386138615,
"grad_norm": 0.09468002617359161,
"learning_rate": 4.0144148627425993e-05,
"loss": 0.5073,
"step": 117
},
{
"epoch": 0.38943894389438943,
"grad_norm": 0.09843214601278305,
"learning_rate": 3.933501846281267e-05,
"loss": 0.4791,
"step": 118
},
{
"epoch": 0.3927392739273927,
"grad_norm": 0.09225810319185257,
"learning_rate": 3.852880399766243e-05,
"loss": 0.5174,
"step": 119
},
{
"epoch": 0.39603960396039606,
"grad_norm": 0.0961516946554184,
"learning_rate": 3.772572564296005e-05,
"loss": 0.5041,
"step": 120
},
{
"epoch": 0.39933993399339934,
"grad_norm": 0.09838926047086716,
"learning_rate": 3.6926002952309016e-05,
"loss": 0.5282,
"step": 121
},
{
"epoch": 0.40264026402640263,
"grad_norm": 0.09036105871200562,
"learning_rate": 3.612985456190778e-05,
"loss": 0.5101,
"step": 122
},
{
"epoch": 0.40594059405940597,
"grad_norm": 0.09711641073226929,
"learning_rate": 3.533749813077677e-05,
"loss": 0.4795,
"step": 123
},
{
"epoch": 0.40924092409240925,
"grad_norm": 0.09840227663516998,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.5046,
"step": 124
},
{
"epoch": 0.41254125412541254,
"grad_norm": 0.10839550197124481,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.5074,
"step": 125
},
{
"epoch": 0.4158415841584158,
"grad_norm": 0.10646429657936096,
"learning_rate": 3.298534127791785e-05,
"loss": 0.5099,
"step": 126
},
{
"epoch": 0.41914191419141916,
"grad_norm": 0.10080739855766296,
"learning_rate": 3.221030765387417e-05,
"loss": 0.5168,
"step": 127
},
{
"epoch": 0.42244224422442245,
"grad_norm": 0.10391907393932343,
"learning_rate": 3.144013755408895e-05,
"loss": 0.4888,
"step": 128
},
{
"epoch": 0.42574257425742573,
"grad_norm": 0.095652274787426,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.5209,
"step": 129
},
{
"epoch": 0.429042904290429,
"grad_norm": 0.09887038916349411,
"learning_rate": 2.991522876735154e-05,
"loss": 0.4931,
"step": 130
},
{
"epoch": 0.43234323432343236,
"grad_norm": 0.10538124293088913,
"learning_rate": 2.916090697523549e-05,
"loss": 0.4637,
"step": 131
},
{
"epoch": 0.43564356435643564,
"grad_norm": 0.09892656654119492,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.4692,
"step": 132
},
{
"epoch": 0.4389438943894389,
"grad_norm": 0.10736441612243652,
"learning_rate": 2.766955965735968e-05,
"loss": 0.4835,
"step": 133
},
{
"epoch": 0.44224422442244227,
"grad_norm": 0.10107211023569107,
"learning_rate": 2.693294185106562e-05,
"loss": 0.4844,
"step": 134
},
{
"epoch": 0.44554455445544555,
"grad_norm": 0.10143737494945526,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.5066,
"step": 135
},
{
"epoch": 0.44884488448844884,
"grad_norm": 0.09721381962299347,
"learning_rate": 2.547882480847461e-05,
"loss": 0.449,
"step": 136
},
{
"epoch": 0.4521452145214521,
"grad_norm": 0.10460088402032852,
"learning_rate": 2.476172311325783e-05,
"loss": 0.4861,
"step": 137
},
{
"epoch": 0.45544554455445546,
"grad_norm": 0.10618559271097183,
"learning_rate": 2.405152131093926e-05,
"loss": 0.4786,
"step": 138
},
{
"epoch": 0.45874587458745875,
"grad_norm": 0.11044533550739288,
"learning_rate": 2.3348413563600325e-05,
"loss": 0.4919,
"step": 139
},
{
"epoch": 0.46204620462046203,
"grad_norm": 0.11068132519721985,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.4912,
"step": 140
},
{
"epoch": 0.46534653465346537,
"grad_norm": 0.09919344633817673,
"learning_rate": 2.196424713241637e-05,
"loss": 0.4706,
"step": 141
},
{
"epoch": 0.46864686468646866,
"grad_norm": 0.11161225289106369,
"learning_rate": 2.128356686585282e-05,
"loss": 0.537,
"step": 142
},
{
"epoch": 0.47194719471947194,
"grad_norm": 0.11140424013137817,
"learning_rate": 2.061073738537635e-05,
"loss": 0.5141,
"step": 143
},
{
"epoch": 0.4752475247524752,
"grad_norm": 0.10639618337154388,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.5129,
"step": 144
},
{
"epoch": 0.47854785478547857,
"grad_norm": 0.11415501683950424,
"learning_rate": 1.928936436551661e-05,
"loss": 0.4886,
"step": 145
},
{
"epoch": 0.48184818481848185,
"grad_norm": 0.11192227900028229,
"learning_rate": 1.8641182076323148e-05,
"loss": 0.5154,
"step": 146
},
{
"epoch": 0.48514851485148514,
"grad_norm": 0.10550237447023392,
"learning_rate": 1.800157297483417e-05,
"loss": 0.5173,
"step": 147
},
{
"epoch": 0.4884488448844885,
"grad_norm": 0.11088621616363525,
"learning_rate": 1.7370711923791567e-05,
"loss": 0.5344,
"step": 148
},
{
"epoch": 0.49174917491749176,
"grad_norm": 0.11859019845724106,
"learning_rate": 1.6748771394307585e-05,
"loss": 0.4985,
"step": 149
},
{
"epoch": 0.49504950495049505,
"grad_norm": 0.1405971348285675,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.5969,
"step": 150
},
{
"epoch": 0.49504950495049505,
"eval_loss": 0.5023409128189087,
"eval_runtime": 19.5543,
"eval_samples_per_second": 26.132,
"eval_steps_per_second": 6.546,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.609807609534874e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}