tvkain's picture
Add files using upload-large-folder tool
f033d87 verified
raw
history blame
27 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1009633587143999,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00033654452904799967,
"grad_norm": 4.331892967224121,
"learning_rate": 0.0,
"loss": 5.5158,
"step": 1
},
{
"epoch": 0.0006730890580959993,
"grad_norm": 4.539519786834717,
"learning_rate": 6.711409395973154e-07,
"loss": 5.5718,
"step": 2
},
{
"epoch": 0.0013461781161919987,
"grad_norm": 4.208465576171875,
"learning_rate": 2.013422818791946e-06,
"loss": 5.4905,
"step": 4
},
{
"epoch": 0.002019267174287998,
"grad_norm": 3.891338586807251,
"learning_rate": 3.3557046979865773e-06,
"loss": 5.4511,
"step": 6
},
{
"epoch": 0.0026923562323839974,
"grad_norm": 2.979590654373169,
"learning_rate": 4.697986577181209e-06,
"loss": 5.3311,
"step": 8
},
{
"epoch": 0.0033654452904799967,
"grad_norm": 3.1792373657226562,
"learning_rate": 6.04026845637584e-06,
"loss": 5.3782,
"step": 10
},
{
"epoch": 0.004038534348575996,
"grad_norm": 3.9909653663635254,
"learning_rate": 7.382550335570471e-06,
"loss": 5.2583,
"step": 12
},
{
"epoch": 0.004711623406671995,
"grad_norm": 3.31044340133667,
"learning_rate": 8.724832214765101e-06,
"loss": 5.3198,
"step": 14
},
{
"epoch": 0.005384712464767995,
"grad_norm": 3.3201687335968018,
"learning_rate": 1.006711409395973e-05,
"loss": 5.1138,
"step": 16
},
{
"epoch": 0.006057801522863994,
"grad_norm": 2.8034451007843018,
"learning_rate": 1.1409395973154363e-05,
"loss": 5.0022,
"step": 18
},
{
"epoch": 0.0067308905809599934,
"grad_norm": 2.044506072998047,
"learning_rate": 1.2751677852348994e-05,
"loss": 4.8879,
"step": 20
},
{
"epoch": 0.007403979639055993,
"grad_norm": 1.6194826364517212,
"learning_rate": 1.4093959731543624e-05,
"loss": 4.7436,
"step": 22
},
{
"epoch": 0.008077068697151992,
"grad_norm": 1.4383995532989502,
"learning_rate": 1.5436241610738255e-05,
"loss": 4.5798,
"step": 24
},
{
"epoch": 0.00875015775524799,
"grad_norm": 1.174633264541626,
"learning_rate": 1.6778523489932888e-05,
"loss": 4.4188,
"step": 26
},
{
"epoch": 0.00942324681334399,
"grad_norm": 1.1277130842208862,
"learning_rate": 1.8120805369127517e-05,
"loss": 4.4374,
"step": 28
},
{
"epoch": 0.01009633587143999,
"grad_norm": 1.0426617860794067,
"learning_rate": 1.946308724832215e-05,
"loss": 4.2195,
"step": 30
},
{
"epoch": 0.01076942492953599,
"grad_norm": 0.9180749654769897,
"learning_rate": 2.080536912751678e-05,
"loss": 4.1755,
"step": 32
},
{
"epoch": 0.011442513987631988,
"grad_norm": 1.0104376077651978,
"learning_rate": 2.2147651006711412e-05,
"loss": 4.0892,
"step": 34
},
{
"epoch": 0.012115603045727988,
"grad_norm": 0.9427777528762817,
"learning_rate": 2.348993288590604e-05,
"loss": 4.0498,
"step": 36
},
{
"epoch": 0.012788692103823987,
"grad_norm": 1.0120079517364502,
"learning_rate": 2.4832214765100674e-05,
"loss": 3.9471,
"step": 38
},
{
"epoch": 0.013461781161919987,
"grad_norm": 1.0842680931091309,
"learning_rate": 2.6174496644295304e-05,
"loss": 3.8882,
"step": 40
},
{
"epoch": 0.014134870220015985,
"grad_norm": 1.3515102863311768,
"learning_rate": 2.7516778523489933e-05,
"loss": 3.8274,
"step": 42
},
{
"epoch": 0.014807959278111986,
"grad_norm": 1.1003209352493286,
"learning_rate": 2.885906040268457e-05,
"loss": 3.7735,
"step": 44
},
{
"epoch": 0.015481048336207984,
"grad_norm": 0.9302487373352051,
"learning_rate": 3.02013422818792e-05,
"loss": 3.731,
"step": 46
},
{
"epoch": 0.016154137394303984,
"grad_norm": 1.0311543941497803,
"learning_rate": 3.1543624161073825e-05,
"loss": 3.6608,
"step": 48
},
{
"epoch": 0.016827226452399983,
"grad_norm": 1.0927435159683228,
"learning_rate": 3.288590604026846e-05,
"loss": 3.5962,
"step": 50
},
{
"epoch": 0.01750031551049598,
"grad_norm": 1.3255321979522705,
"learning_rate": 3.422818791946309e-05,
"loss": 3.5669,
"step": 52
},
{
"epoch": 0.018173404568591983,
"grad_norm": 0.9304305911064148,
"learning_rate": 3.557046979865772e-05,
"loss": 3.5456,
"step": 54
},
{
"epoch": 0.01884649362668798,
"grad_norm": 1.159280776977539,
"learning_rate": 3.6912751677852356e-05,
"loss": 3.5189,
"step": 56
},
{
"epoch": 0.01951958268478398,
"grad_norm": 0.9376134276390076,
"learning_rate": 3.8255033557046985e-05,
"loss": 3.432,
"step": 58
},
{
"epoch": 0.02019267174287998,
"grad_norm": 1.292802095413208,
"learning_rate": 3.959731543624161e-05,
"loss": 3.4564,
"step": 60
},
{
"epoch": 0.02086576080097598,
"grad_norm": 1.2383852005004883,
"learning_rate": 4.0939597315436244e-05,
"loss": 3.4194,
"step": 62
},
{
"epoch": 0.02153884985907198,
"grad_norm": 0.8546445369720459,
"learning_rate": 4.228187919463087e-05,
"loss": 3.3873,
"step": 64
},
{
"epoch": 0.022211938917167977,
"grad_norm": 1.4104743003845215,
"learning_rate": 4.36241610738255e-05,
"loss": 3.3647,
"step": 66
},
{
"epoch": 0.022885027975263976,
"grad_norm": 1.3548426628112793,
"learning_rate": 4.496644295302014e-05,
"loss": 3.3213,
"step": 68
},
{
"epoch": 0.023558117033359978,
"grad_norm": 1.1530455350875854,
"learning_rate": 4.630872483221477e-05,
"loss": 3.2789,
"step": 70
},
{
"epoch": 0.024231206091455976,
"grad_norm": 1.3092457056045532,
"learning_rate": 4.76510067114094e-05,
"loss": 3.2922,
"step": 72
},
{
"epoch": 0.024904295149551975,
"grad_norm": 1.577699065208435,
"learning_rate": 4.8993288590604034e-05,
"loss": 3.2308,
"step": 74
},
{
"epoch": 0.025577384207647973,
"grad_norm": 1.3348486423492432,
"learning_rate": 5.033557046979866e-05,
"loss": 3.2656,
"step": 76
},
{
"epoch": 0.026250473265743975,
"grad_norm": 1.1966625452041626,
"learning_rate": 5.167785234899329e-05,
"loss": 3.2703,
"step": 78
},
{
"epoch": 0.026923562323839974,
"grad_norm": 1.3125278949737549,
"learning_rate": 5.302013422818792e-05,
"loss": 3.2034,
"step": 80
},
{
"epoch": 0.027596651381935972,
"grad_norm": 1.1957862377166748,
"learning_rate": 5.436241610738255e-05,
"loss": 3.2476,
"step": 82
},
{
"epoch": 0.02826974044003197,
"grad_norm": 1.2177337408065796,
"learning_rate": 5.570469798657718e-05,
"loss": 3.2166,
"step": 84
},
{
"epoch": 0.028942829498127973,
"grad_norm": 0.8806389570236206,
"learning_rate": 5.704697986577181e-05,
"loss": 3.1722,
"step": 86
},
{
"epoch": 0.02961591855622397,
"grad_norm": 1.8180561065673828,
"learning_rate": 5.838926174496645e-05,
"loss": 3.1597,
"step": 88
},
{
"epoch": 0.03028900761431997,
"grad_norm": 1.1676297187805176,
"learning_rate": 5.973154362416108e-05,
"loss": 3.1829,
"step": 90
},
{
"epoch": 0.030962096672415968,
"grad_norm": 1.0163198709487915,
"learning_rate": 6.107382550335571e-05,
"loss": 3.1643,
"step": 92
},
{
"epoch": 0.03163518573051197,
"grad_norm": 1.0734015703201294,
"learning_rate": 6.241610738255034e-05,
"loss": 3.1692,
"step": 94
},
{
"epoch": 0.03230827478860797,
"grad_norm": 1.4144916534423828,
"learning_rate": 6.375838926174497e-05,
"loss": 3.1217,
"step": 96
},
{
"epoch": 0.03298136384670397,
"grad_norm": 1.5647915601730347,
"learning_rate": 6.51006711409396e-05,
"loss": 3.1324,
"step": 98
},
{
"epoch": 0.033654452904799965,
"grad_norm": 1.1999105215072632,
"learning_rate": 6.644295302013423e-05,
"loss": 3.1103,
"step": 100
},
{
"epoch": 0.03432754196289597,
"grad_norm": 1.512838363647461,
"learning_rate": 6.778523489932886e-05,
"loss": 3.1036,
"step": 102
},
{
"epoch": 0.03500063102099196,
"grad_norm": 1.1092990636825562,
"learning_rate": 6.912751677852349e-05,
"loss": 3.1244,
"step": 104
},
{
"epoch": 0.035673720079087964,
"grad_norm": 1.2763620615005493,
"learning_rate": 7.046979865771812e-05,
"loss": 3.0989,
"step": 106
},
{
"epoch": 0.036346809137183966,
"grad_norm": 1.2328648567199707,
"learning_rate": 7.181208053691275e-05,
"loss": 3.0506,
"step": 108
},
{
"epoch": 0.03701989819527996,
"grad_norm": 1.6039047241210938,
"learning_rate": 7.315436241610739e-05,
"loss": 3.0589,
"step": 110
},
{
"epoch": 0.03769298725337596,
"grad_norm": 1.7161307334899902,
"learning_rate": 7.449664429530202e-05,
"loss": 3.0296,
"step": 112
},
{
"epoch": 0.03836607631147196,
"grad_norm": 1.2628991603851318,
"learning_rate": 7.583892617449665e-05,
"loss": 3.063,
"step": 114
},
{
"epoch": 0.03903916536956796,
"grad_norm": 1.1414180994033813,
"learning_rate": 7.718120805369128e-05,
"loss": 3.0366,
"step": 116
},
{
"epoch": 0.03971225442766396,
"grad_norm": 1.5152932405471802,
"learning_rate": 7.852348993288591e-05,
"loss": 3.0241,
"step": 118
},
{
"epoch": 0.04038534348575996,
"grad_norm": 1.6119567155838013,
"learning_rate": 7.986577181208054e-05,
"loss": 3.0139,
"step": 120
},
{
"epoch": 0.04105843254385596,
"grad_norm": 1.6078091859817505,
"learning_rate": 8.120805369127518e-05,
"loss": 3.0288,
"step": 122
},
{
"epoch": 0.04173152160195196,
"grad_norm": 1.1092705726623535,
"learning_rate": 8.255033557046981e-05,
"loss": 2.9996,
"step": 124
},
{
"epoch": 0.042404610660047956,
"grad_norm": 1.2352242469787598,
"learning_rate": 8.389261744966444e-05,
"loss": 2.9872,
"step": 126
},
{
"epoch": 0.04307769971814396,
"grad_norm": 1.636400580406189,
"learning_rate": 8.523489932885907e-05,
"loss": 2.9814,
"step": 128
},
{
"epoch": 0.04375078877623995,
"grad_norm": 1.4877128601074219,
"learning_rate": 8.65771812080537e-05,
"loss": 2.9756,
"step": 130
},
{
"epoch": 0.044423877834335955,
"grad_norm": 1.2983709573745728,
"learning_rate": 8.791946308724833e-05,
"loss": 2.9756,
"step": 132
},
{
"epoch": 0.04509696689243196,
"grad_norm": 1.7350983619689941,
"learning_rate": 8.926174496644296e-05,
"loss": 2.9579,
"step": 134
},
{
"epoch": 0.04577005595052795,
"grad_norm": 0.978854775428772,
"learning_rate": 9.060402684563759e-05,
"loss": 2.9269,
"step": 136
},
{
"epoch": 0.046443145008623954,
"grad_norm": 1.362163782119751,
"learning_rate": 9.194630872483221e-05,
"loss": 2.99,
"step": 138
},
{
"epoch": 0.047116234066719956,
"grad_norm": 1.328202247619629,
"learning_rate": 9.328859060402684e-05,
"loss": 2.966,
"step": 140
},
{
"epoch": 0.04778932312481595,
"grad_norm": 1.2497445344924927,
"learning_rate": 9.463087248322147e-05,
"loss": 2.9254,
"step": 142
},
{
"epoch": 0.04846241218291195,
"grad_norm": 1.071092128753662,
"learning_rate": 9.59731543624161e-05,
"loss": 2.9597,
"step": 144
},
{
"epoch": 0.04913550124100795,
"grad_norm": 1.451729416847229,
"learning_rate": 9.731543624161075e-05,
"loss": 2.9605,
"step": 146
},
{
"epoch": 0.04980859029910395,
"grad_norm": 1.328731656074524,
"learning_rate": 9.865771812080538e-05,
"loss": 2.9493,
"step": 148
},
{
"epoch": 0.05048167935719995,
"grad_norm": 1.4675222635269165,
"learning_rate": 0.0001,
"loss": 2.9298,
"step": 150
},
{
"epoch": 0.05115476841529595,
"grad_norm": 1.208961009979248,
"learning_rate": 9.99998761551904e-05,
"loss": 2.9382,
"step": 152
},
{
"epoch": 0.05182785747339195,
"grad_norm": 1.0392056703567505,
"learning_rate": 9.999950462137508e-05,
"loss": 2.8829,
"step": 154
},
{
"epoch": 0.05250094653148795,
"grad_norm": 1.378711462020874,
"learning_rate": 9.999888540039458e-05,
"loss": 2.9041,
"step": 156
},
{
"epoch": 0.053174035589583946,
"grad_norm": 1.2687252759933472,
"learning_rate": 9.999801849531635e-05,
"loss": 2.9148,
"step": 158
},
{
"epoch": 0.05384712464767995,
"grad_norm": 1.2382102012634277,
"learning_rate": 9.999690391043487e-05,
"loss": 2.9107,
"step": 160
},
{
"epoch": 0.05452021370577594,
"grad_norm": 1.0215928554534912,
"learning_rate": 9.999554165127159e-05,
"loss": 2.9187,
"step": 162
},
{
"epoch": 0.055193302763871945,
"grad_norm": 1.2638540267944336,
"learning_rate": 9.99939317245748e-05,
"loss": 2.9261,
"step": 164
},
{
"epoch": 0.055866391821967946,
"grad_norm": 1.3826959133148193,
"learning_rate": 9.999207413831982e-05,
"loss": 2.8944,
"step": 166
},
{
"epoch": 0.05653948088006394,
"grad_norm": 1.0764875411987305,
"learning_rate": 9.998996890170867e-05,
"loss": 2.9031,
"step": 168
},
{
"epoch": 0.05721256993815994,
"grad_norm": 1.5163936614990234,
"learning_rate": 9.99876160251703e-05,
"loss": 2.8687,
"step": 170
},
{
"epoch": 0.057885658996255945,
"grad_norm": 1.5821291208267212,
"learning_rate": 9.998501552036037e-05,
"loss": 2.8828,
"step": 172
},
{
"epoch": 0.05855874805435194,
"grad_norm": 1.4572120904922485,
"learning_rate": 9.998216740016124e-05,
"loss": 2.8898,
"step": 174
},
{
"epoch": 0.05923183711244794,
"grad_norm": 1.2185218334197998,
"learning_rate": 9.99790716786819e-05,
"loss": 2.8456,
"step": 176
},
{
"epoch": 0.05990492617054394,
"grad_norm": 1.106446385383606,
"learning_rate": 9.99757283712579e-05,
"loss": 2.8793,
"step": 178
},
{
"epoch": 0.06057801522863994,
"grad_norm": 1.1448893547058105,
"learning_rate": 9.997213749445129e-05,
"loss": 2.8579,
"step": 180
},
{
"epoch": 0.06125110428673594,
"grad_norm": 1.1587834358215332,
"learning_rate": 9.996829906605056e-05,
"loss": 2.8839,
"step": 182
},
{
"epoch": 0.061924193344831936,
"grad_norm": 1.0969592332839966,
"learning_rate": 9.996421310507046e-05,
"loss": 2.8638,
"step": 184
},
{
"epoch": 0.06259728240292793,
"grad_norm": 0.9740116000175476,
"learning_rate": 9.9959879631752e-05,
"loss": 2.8455,
"step": 186
},
{
"epoch": 0.06327037146102393,
"grad_norm": 1.2307910919189453,
"learning_rate": 9.995529866756231e-05,
"loss": 2.8534,
"step": 188
},
{
"epoch": 0.06394346051911994,
"grad_norm": 1.5021939277648926,
"learning_rate": 9.995047023519452e-05,
"loss": 2.8469,
"step": 190
},
{
"epoch": 0.06461654957721594,
"grad_norm": 1.1044224500656128,
"learning_rate": 9.994539435856771e-05,
"loss": 2.8429,
"step": 192
},
{
"epoch": 0.06528963863531194,
"grad_norm": 1.4586883783340454,
"learning_rate": 9.99400710628267e-05,
"loss": 2.836,
"step": 194
},
{
"epoch": 0.06596272769340794,
"grad_norm": 1.2613426446914673,
"learning_rate": 9.993450037434199e-05,
"loss": 2.8243,
"step": 196
},
{
"epoch": 0.06663581675150393,
"grad_norm": 1.0347422361373901,
"learning_rate": 9.992868232070963e-05,
"loss": 2.7965,
"step": 198
},
{
"epoch": 0.06730890580959993,
"grad_norm": 2.1357574462890625,
"learning_rate": 9.992261693075103e-05,
"loss": 2.8486,
"step": 200
},
{
"epoch": 0.06798199486769593,
"grad_norm": 1.0357908010482788,
"learning_rate": 9.991630423451286e-05,
"loss": 2.8386,
"step": 202
},
{
"epoch": 0.06865508392579193,
"grad_norm": 1.1383159160614014,
"learning_rate": 9.990974426326696e-05,
"loss": 2.7874,
"step": 204
},
{
"epoch": 0.06932817298388794,
"grad_norm": 0.8452678322792053,
"learning_rate": 9.990293704951001e-05,
"loss": 2.786,
"step": 206
},
{
"epoch": 0.07000126204198392,
"grad_norm": 0.9482727646827698,
"learning_rate": 9.989588262696357e-05,
"loss": 2.8156,
"step": 208
},
{
"epoch": 0.07067435110007993,
"grad_norm": 0.8251766562461853,
"learning_rate": 9.988858103057378e-05,
"loss": 2.7588,
"step": 210
},
{
"epoch": 0.07134744015817593,
"grad_norm": 1.211065649986267,
"learning_rate": 9.988103229651121e-05,
"loss": 2.7623,
"step": 212
},
{
"epoch": 0.07202052921627193,
"grad_norm": 0.8990377187728882,
"learning_rate": 9.987323646217075e-05,
"loss": 2.8164,
"step": 214
},
{
"epoch": 0.07269361827436793,
"grad_norm": 0.9878025054931641,
"learning_rate": 9.986519356617132e-05,
"loss": 2.7847,
"step": 216
},
{
"epoch": 0.07336670733246392,
"grad_norm": 0.7551445364952087,
"learning_rate": 9.985690364835576e-05,
"loss": 2.8111,
"step": 218
},
{
"epoch": 0.07403979639055992,
"grad_norm": 0.9582260251045227,
"learning_rate": 9.984836674979062e-05,
"loss": 2.793,
"step": 220
},
{
"epoch": 0.07471288544865592,
"grad_norm": 0.8087739944458008,
"learning_rate": 9.983958291276591e-05,
"loss": 2.7464,
"step": 222
},
{
"epoch": 0.07538597450675193,
"grad_norm": 1.2373522520065308,
"learning_rate": 9.983055218079493e-05,
"loss": 2.7656,
"step": 224
},
{
"epoch": 0.07605906356484793,
"grad_norm": 0.9746289849281311,
"learning_rate": 9.982127459861408e-05,
"loss": 2.7765,
"step": 226
},
{
"epoch": 0.07673215262294392,
"grad_norm": 0.6946307420730591,
"learning_rate": 9.981175021218255e-05,
"loss": 2.7491,
"step": 228
},
{
"epoch": 0.07740524168103992,
"grad_norm": 0.8959107398986816,
"learning_rate": 9.980197906868215e-05,
"loss": 2.7565,
"step": 230
},
{
"epoch": 0.07807833073913592,
"grad_norm": 0.9889335036277771,
"learning_rate": 9.979196121651716e-05,
"loss": 2.7974,
"step": 232
},
{
"epoch": 0.07875141979723192,
"grad_norm": 0.9802746176719666,
"learning_rate": 9.978169670531388e-05,
"loss": 2.7772,
"step": 234
},
{
"epoch": 0.07942450885532792,
"grad_norm": 0.6934760808944702,
"learning_rate": 9.977118558592059e-05,
"loss": 2.7602,
"step": 236
},
{
"epoch": 0.08009759791342393,
"grad_norm": 0.8996357917785645,
"learning_rate": 9.97604279104072e-05,
"loss": 2.7669,
"step": 238
},
{
"epoch": 0.08077068697151991,
"grad_norm": 0.8844061493873596,
"learning_rate": 9.974942373206499e-05,
"loss": 2.7458,
"step": 240
},
{
"epoch": 0.08144377602961592,
"grad_norm": 1.023626685142517,
"learning_rate": 9.973817310540638e-05,
"loss": 2.7639,
"step": 242
},
{
"epoch": 0.08211686508771192,
"grad_norm": 0.8241132497787476,
"learning_rate": 9.972667608616466e-05,
"loss": 2.7457,
"step": 244
},
{
"epoch": 0.08278995414580792,
"grad_norm": 0.7864794135093689,
"learning_rate": 9.971493273129364e-05,
"loss": 2.763,
"step": 246
},
{
"epoch": 0.08346304320390392,
"grad_norm": 1.071751356124878,
"learning_rate": 9.970294309896747e-05,
"loss": 2.7347,
"step": 248
},
{
"epoch": 0.08413613226199991,
"grad_norm": 0.8978875279426575,
"learning_rate": 9.969070724858031e-05,
"loss": 2.7807,
"step": 250
},
{
"epoch": 0.08480922132009591,
"grad_norm": 0.9984204769134521,
"learning_rate": 9.967822524074602e-05,
"loss": 2.7399,
"step": 252
},
{
"epoch": 0.08548231037819191,
"grad_norm": 0.8611739873886108,
"learning_rate": 9.966549713729787e-05,
"loss": 2.753,
"step": 254
},
{
"epoch": 0.08615539943628792,
"grad_norm": 0.8647720217704773,
"learning_rate": 9.965252300128826e-05,
"loss": 2.7224,
"step": 256
},
{
"epoch": 0.08682848849438392,
"grad_norm": 0.8688477873802185,
"learning_rate": 9.963930289698833e-05,
"loss": 2.6879,
"step": 258
},
{
"epoch": 0.0875015775524799,
"grad_norm": 1.1445469856262207,
"learning_rate": 9.962583688988778e-05,
"loss": 2.739,
"step": 260
},
{
"epoch": 0.08817466661057591,
"grad_norm": 0.8668599128723145,
"learning_rate": 9.961212504669437e-05,
"loss": 2.6962,
"step": 262
},
{
"epoch": 0.08884775566867191,
"grad_norm": 0.905125617980957,
"learning_rate": 9.959816743533375e-05,
"loss": 2.7239,
"step": 264
},
{
"epoch": 0.08952084472676791,
"grad_norm": 0.8252028822898865,
"learning_rate": 9.958396412494901e-05,
"loss": 2.7381,
"step": 266
},
{
"epoch": 0.09019393378486391,
"grad_norm": 0.7380514740943909,
"learning_rate": 9.956951518590043e-05,
"loss": 2.7135,
"step": 268
},
{
"epoch": 0.09086702284295992,
"grad_norm": 0.7395239472389221,
"learning_rate": 9.955482068976502e-05,
"loss": 2.6954,
"step": 270
},
{
"epoch": 0.0915401119010559,
"grad_norm": 0.6564229726791382,
"learning_rate": 9.953988070933628e-05,
"loss": 2.7145,
"step": 272
},
{
"epoch": 0.0922132009591519,
"grad_norm": 0.7306910157203674,
"learning_rate": 9.952469531862378e-05,
"loss": 2.6951,
"step": 274
},
{
"epoch": 0.09288629001724791,
"grad_norm": 0.6810031533241272,
"learning_rate": 9.950926459285277e-05,
"loss": 2.7201,
"step": 276
},
{
"epoch": 0.09355937907534391,
"grad_norm": 0.6724168658256531,
"learning_rate": 9.949358860846388e-05,
"loss": 2.7112,
"step": 278
},
{
"epoch": 0.09423246813343991,
"grad_norm": 0.7065703272819519,
"learning_rate": 9.947766744311268e-05,
"loss": 2.6884,
"step": 280
},
{
"epoch": 0.0949055571915359,
"grad_norm": 0.8231908679008484,
"learning_rate": 9.946150117566931e-05,
"loss": 2.7286,
"step": 282
},
{
"epoch": 0.0955786462496319,
"grad_norm": 0.9570270776748657,
"learning_rate": 9.944508988621812e-05,
"loss": 2.7166,
"step": 284
},
{
"epoch": 0.0962517353077279,
"grad_norm": 0.9357023239135742,
"learning_rate": 9.94284336560572e-05,
"loss": 2.6768,
"step": 286
},
{
"epoch": 0.0969248243658239,
"grad_norm": 0.6350796222686768,
"learning_rate": 9.941153256769809e-05,
"loss": 2.6921,
"step": 288
},
{
"epoch": 0.09759791342391991,
"grad_norm": 0.6700872778892517,
"learning_rate": 9.939438670486525e-05,
"loss": 2.6847,
"step": 290
},
{
"epoch": 0.0982710024820159,
"grad_norm": 0.6851752400398254,
"learning_rate": 9.937699615249572e-05,
"loss": 2.6586,
"step": 292
},
{
"epoch": 0.0989440915401119,
"grad_norm": 0.7098946571350098,
"learning_rate": 9.935936099673871e-05,
"loss": 2.6793,
"step": 294
},
{
"epoch": 0.0996171805982079,
"grad_norm": 0.680543839931488,
"learning_rate": 9.934148132495511e-05,
"loss": 2.6763,
"step": 296
},
{
"epoch": 0.1002902696563039,
"grad_norm": 0.6832155585289001,
"learning_rate": 9.932335722571709e-05,
"loss": 2.6768,
"step": 298
},
{
"epoch": 0.1009633587143999,
"grad_norm": 0.8236553072929382,
"learning_rate": 9.930498878880768e-05,
"loss": 2.6738,
"step": 300
}
],
"logging_steps": 2,
"max_steps": 2972,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 300,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.377550336196608e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}