prxy5608's picture
Training in progress, step 200, checkpoint
52e7f74 verified
{
"best_metric": 0.6153644323348999,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.1970928800197093,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009854644000985464,
"grad_norm": 0.21368320286273956,
"learning_rate": 5e-06,
"loss": 0.0811,
"step": 1
},
{
"epoch": 0.0009854644000985464,
"eval_loss": 0.8373544812202454,
"eval_runtime": 66.3618,
"eval_samples_per_second": 25.753,
"eval_steps_per_second": 12.884,
"step": 1
},
{
"epoch": 0.001970928800197093,
"grad_norm": 0.260768860578537,
"learning_rate": 1e-05,
"loss": 0.2399,
"step": 2
},
{
"epoch": 0.0029563932002956393,
"grad_norm": 0.44264376163482666,
"learning_rate": 1.5e-05,
"loss": 0.364,
"step": 3
},
{
"epoch": 0.003941857600394186,
"grad_norm": 0.5036729574203491,
"learning_rate": 2e-05,
"loss": 0.3749,
"step": 4
},
{
"epoch": 0.004927322000492732,
"grad_norm": 0.45520466566085815,
"learning_rate": 2.5e-05,
"loss": 0.4327,
"step": 5
},
{
"epoch": 0.005912786400591279,
"grad_norm": 0.43378737568855286,
"learning_rate": 3e-05,
"loss": 0.4906,
"step": 6
},
{
"epoch": 0.006898250800689825,
"grad_norm": 0.29966574907302856,
"learning_rate": 3.5e-05,
"loss": 0.6722,
"step": 7
},
{
"epoch": 0.007883715200788372,
"grad_norm": 0.288310706615448,
"learning_rate": 4e-05,
"loss": 0.7376,
"step": 8
},
{
"epoch": 0.008869179600886918,
"grad_norm": 0.26541921496391296,
"learning_rate": 4.5e-05,
"loss": 0.7256,
"step": 9
},
{
"epoch": 0.009854644000985464,
"grad_norm": 0.23990140855312347,
"learning_rate": 5e-05,
"loss": 0.8011,
"step": 10
},
{
"epoch": 0.01084010840108401,
"grad_norm": 0.2725456655025482,
"learning_rate": 5.500000000000001e-05,
"loss": 0.7134,
"step": 11
},
{
"epoch": 0.011825572801182557,
"grad_norm": 0.26453617215156555,
"learning_rate": 6e-05,
"loss": 0.817,
"step": 12
},
{
"epoch": 0.012811037201281104,
"grad_norm": 0.25874823331832886,
"learning_rate": 6.500000000000001e-05,
"loss": 0.6065,
"step": 13
},
{
"epoch": 0.01379650160137965,
"grad_norm": 0.2898238003253937,
"learning_rate": 7e-05,
"loss": 0.648,
"step": 14
},
{
"epoch": 0.014781966001478197,
"grad_norm": 0.29036572575569153,
"learning_rate": 7.500000000000001e-05,
"loss": 0.8054,
"step": 15
},
{
"epoch": 0.015767430401576743,
"grad_norm": 0.2954308092594147,
"learning_rate": 8e-05,
"loss": 0.724,
"step": 16
},
{
"epoch": 0.01675289480167529,
"grad_norm": 0.27134811878204346,
"learning_rate": 8.5e-05,
"loss": 0.6222,
"step": 17
},
{
"epoch": 0.017738359201773836,
"grad_norm": 0.27076807618141174,
"learning_rate": 9e-05,
"loss": 0.6876,
"step": 18
},
{
"epoch": 0.018723823601872382,
"grad_norm": 0.2982608675956726,
"learning_rate": 9.5e-05,
"loss": 0.7342,
"step": 19
},
{
"epoch": 0.01970928800197093,
"grad_norm": 0.2779473066329956,
"learning_rate": 0.0001,
"loss": 0.7138,
"step": 20
},
{
"epoch": 0.020694752402069475,
"grad_norm": 0.2604323625564575,
"learning_rate": 9.999238475781957e-05,
"loss": 0.6335,
"step": 21
},
{
"epoch": 0.02168021680216802,
"grad_norm": 0.2886645495891571,
"learning_rate": 9.99695413509548e-05,
"loss": 0.6257,
"step": 22
},
{
"epoch": 0.022665681202266568,
"grad_norm": 0.2738495469093323,
"learning_rate": 9.99314767377287e-05,
"loss": 0.6123,
"step": 23
},
{
"epoch": 0.023651145602365115,
"grad_norm": 0.29884693026542664,
"learning_rate": 9.987820251299122e-05,
"loss": 0.7732,
"step": 24
},
{
"epoch": 0.02463661000246366,
"grad_norm": 0.28413522243499756,
"learning_rate": 9.980973490458728e-05,
"loss": 0.737,
"step": 25
},
{
"epoch": 0.025622074402562207,
"grad_norm": 0.284929484128952,
"learning_rate": 9.972609476841367e-05,
"loss": 0.6115,
"step": 26
},
{
"epoch": 0.026607538802660754,
"grad_norm": 0.3191145360469818,
"learning_rate": 9.962730758206611e-05,
"loss": 0.8331,
"step": 27
},
{
"epoch": 0.0275930032027593,
"grad_norm": 0.288494348526001,
"learning_rate": 9.951340343707852e-05,
"loss": 0.6987,
"step": 28
},
{
"epoch": 0.028578467602857847,
"grad_norm": 0.33966076374053955,
"learning_rate": 9.938441702975689e-05,
"loss": 0.787,
"step": 29
},
{
"epoch": 0.029563932002956393,
"grad_norm": 0.29486000537872314,
"learning_rate": 9.924038765061042e-05,
"loss": 0.7766,
"step": 30
},
{
"epoch": 0.03054939640305494,
"grad_norm": 0.3164560794830322,
"learning_rate": 9.908135917238321e-05,
"loss": 0.7605,
"step": 31
},
{
"epoch": 0.031534860803153486,
"grad_norm": 0.3024621605873108,
"learning_rate": 9.890738003669029e-05,
"loss": 0.6868,
"step": 32
},
{
"epoch": 0.032520325203252036,
"grad_norm": 0.31722673773765564,
"learning_rate": 9.871850323926177e-05,
"loss": 0.711,
"step": 33
},
{
"epoch": 0.03350578960335058,
"grad_norm": 0.34064680337905884,
"learning_rate": 9.851478631379982e-05,
"loss": 0.7589,
"step": 34
},
{
"epoch": 0.03449125400344913,
"grad_norm": 0.3236991763114929,
"learning_rate": 9.829629131445342e-05,
"loss": 0.6888,
"step": 35
},
{
"epoch": 0.03547671840354767,
"grad_norm": 0.3362804055213928,
"learning_rate": 9.806308479691595e-05,
"loss": 0.7985,
"step": 36
},
{
"epoch": 0.03646218280364622,
"grad_norm": 0.36856207251548767,
"learning_rate": 9.781523779815179e-05,
"loss": 0.8506,
"step": 37
},
{
"epoch": 0.037447647203744765,
"grad_norm": 0.38532865047454834,
"learning_rate": 9.755282581475769e-05,
"loss": 0.6945,
"step": 38
},
{
"epoch": 0.038433111603843315,
"grad_norm": 0.3765904903411865,
"learning_rate": 9.727592877996585e-05,
"loss": 0.8341,
"step": 39
},
{
"epoch": 0.03941857600394186,
"grad_norm": 0.3601222336292267,
"learning_rate": 9.698463103929542e-05,
"loss": 0.6398,
"step": 40
},
{
"epoch": 0.04040404040404041,
"grad_norm": 0.40156570076942444,
"learning_rate": 9.667902132486009e-05,
"loss": 0.6301,
"step": 41
},
{
"epoch": 0.04138950480413895,
"grad_norm": 0.4138280153274536,
"learning_rate": 9.635919272833938e-05,
"loss": 0.7974,
"step": 42
},
{
"epoch": 0.0423749692042375,
"grad_norm": 0.4361515939235687,
"learning_rate": 9.602524267262203e-05,
"loss": 0.7225,
"step": 43
},
{
"epoch": 0.04336043360433604,
"grad_norm": 0.4487519860267639,
"learning_rate": 9.567727288213005e-05,
"loss": 0.7484,
"step": 44
},
{
"epoch": 0.04434589800443459,
"grad_norm": 0.43802666664123535,
"learning_rate": 9.53153893518325e-05,
"loss": 0.7125,
"step": 45
},
{
"epoch": 0.045331362404533136,
"grad_norm": 0.4856754541397095,
"learning_rate": 9.493970231495835e-05,
"loss": 0.9,
"step": 46
},
{
"epoch": 0.046316826804631686,
"grad_norm": 0.5210389494895935,
"learning_rate": 9.45503262094184e-05,
"loss": 0.9333,
"step": 47
},
{
"epoch": 0.04730229120473023,
"grad_norm": 0.5823840498924255,
"learning_rate": 9.414737964294636e-05,
"loss": 0.7554,
"step": 48
},
{
"epoch": 0.04828775560482878,
"grad_norm": 0.7275289297103882,
"learning_rate": 9.373098535696979e-05,
"loss": 0.9166,
"step": 49
},
{
"epoch": 0.04927322000492732,
"grad_norm": 0.8048695921897888,
"learning_rate": 9.330127018922194e-05,
"loss": 0.9283,
"step": 50
},
{
"epoch": 0.04927322000492732,
"eval_loss": 0.6464605927467346,
"eval_runtime": 66.2868,
"eval_samples_per_second": 25.782,
"eval_steps_per_second": 12.899,
"step": 50
},
{
"epoch": 0.05025868440502587,
"grad_norm": 0.12230159342288971,
"learning_rate": 9.285836503510562e-05,
"loss": 0.0571,
"step": 51
},
{
"epoch": 0.051244148805124415,
"grad_norm": 0.11277984827756882,
"learning_rate": 9.24024048078213e-05,
"loss": 0.1799,
"step": 52
},
{
"epoch": 0.052229613205222965,
"grad_norm": 0.1283257156610489,
"learning_rate": 9.193352839727121e-05,
"loss": 0.2379,
"step": 53
},
{
"epoch": 0.05321507760532151,
"grad_norm": 0.1701822280883789,
"learning_rate": 9.145187862775209e-05,
"loss": 0.3356,
"step": 54
},
{
"epoch": 0.05420054200542006,
"grad_norm": 0.20211534202098846,
"learning_rate": 9.09576022144496e-05,
"loss": 0.4511,
"step": 55
},
{
"epoch": 0.0551860064055186,
"grad_norm": 0.20168742537498474,
"learning_rate": 9.045084971874738e-05,
"loss": 0.558,
"step": 56
},
{
"epoch": 0.05617147080561715,
"grad_norm": 0.1796450912952423,
"learning_rate": 8.993177550236464e-05,
"loss": 0.5004,
"step": 57
},
{
"epoch": 0.057156935205715693,
"grad_norm": 0.21156959235668182,
"learning_rate": 8.940053768033609e-05,
"loss": 0.6535,
"step": 58
},
{
"epoch": 0.05814239960581424,
"grad_norm": 0.19509942829608917,
"learning_rate": 8.885729807284856e-05,
"loss": 0.5512,
"step": 59
},
{
"epoch": 0.059127864005912786,
"grad_norm": 0.22828419506549835,
"learning_rate": 8.83022221559489e-05,
"loss": 0.8388,
"step": 60
},
{
"epoch": 0.060113328406011336,
"grad_norm": 0.20433418452739716,
"learning_rate": 8.773547901113862e-05,
"loss": 0.5995,
"step": 61
},
{
"epoch": 0.06109879280610988,
"grad_norm": 0.20512636005878448,
"learning_rate": 8.715724127386972e-05,
"loss": 0.6224,
"step": 62
},
{
"epoch": 0.06208425720620843,
"grad_norm": 0.21725022792816162,
"learning_rate": 8.656768508095853e-05,
"loss": 0.6128,
"step": 63
},
{
"epoch": 0.06306972160630697,
"grad_norm": 0.2174377739429474,
"learning_rate": 8.596699001693255e-05,
"loss": 0.6843,
"step": 64
},
{
"epoch": 0.06405518600640552,
"grad_norm": 0.2236977070569992,
"learning_rate": 8.535533905932738e-05,
"loss": 0.6858,
"step": 65
},
{
"epoch": 0.06504065040650407,
"grad_norm": 0.27690693736076355,
"learning_rate": 8.473291852294987e-05,
"loss": 0.9392,
"step": 66
},
{
"epoch": 0.06602611480660261,
"grad_norm": 0.22860707342624664,
"learning_rate": 8.409991800312493e-05,
"loss": 0.7385,
"step": 67
},
{
"epoch": 0.06701157920670116,
"grad_norm": 0.2298586070537567,
"learning_rate": 8.345653031794292e-05,
"loss": 0.7546,
"step": 68
},
{
"epoch": 0.0679970436067997,
"grad_norm": 0.2377099245786667,
"learning_rate": 8.280295144952536e-05,
"loss": 0.6743,
"step": 69
},
{
"epoch": 0.06898250800689826,
"grad_norm": 0.21426402032375336,
"learning_rate": 8.213938048432697e-05,
"loss": 0.5414,
"step": 70
},
{
"epoch": 0.0699679724069968,
"grad_norm": 0.23989328742027283,
"learning_rate": 8.146601955249188e-05,
"loss": 0.606,
"step": 71
},
{
"epoch": 0.07095343680709534,
"grad_norm": 0.2526426613330841,
"learning_rate": 8.07830737662829e-05,
"loss": 0.6877,
"step": 72
},
{
"epoch": 0.07193890120719389,
"grad_norm": 0.25667741894721985,
"learning_rate": 8.009075115760243e-05,
"loss": 0.8371,
"step": 73
},
{
"epoch": 0.07292436560729244,
"grad_norm": 0.2683584690093994,
"learning_rate": 7.938926261462366e-05,
"loss": 0.9071,
"step": 74
},
{
"epoch": 0.07390983000739099,
"grad_norm": 0.2747768461704254,
"learning_rate": 7.86788218175523e-05,
"loss": 0.7603,
"step": 75
},
{
"epoch": 0.07489529440748953,
"grad_norm": 0.2705404758453369,
"learning_rate": 7.795964517353735e-05,
"loss": 0.7255,
"step": 76
},
{
"epoch": 0.07588075880758807,
"grad_norm": 0.2925010323524475,
"learning_rate": 7.723195175075136e-05,
"loss": 0.7684,
"step": 77
},
{
"epoch": 0.07686622320768663,
"grad_norm": 0.2544151544570923,
"learning_rate": 7.649596321166024e-05,
"loss": 0.6322,
"step": 78
},
{
"epoch": 0.07785168760778517,
"grad_norm": 0.27934426069259644,
"learning_rate": 7.575190374550272e-05,
"loss": 0.7391,
"step": 79
},
{
"epoch": 0.07883715200788372,
"grad_norm": 0.3051762878894806,
"learning_rate": 7.500000000000001e-05,
"loss": 0.7494,
"step": 80
},
{
"epoch": 0.07982261640798226,
"grad_norm": 0.28656357526779175,
"learning_rate": 7.424048101231686e-05,
"loss": 0.7278,
"step": 81
},
{
"epoch": 0.08080808080808081,
"grad_norm": 0.3144802153110504,
"learning_rate": 7.347357813929454e-05,
"loss": 0.719,
"step": 82
},
{
"epoch": 0.08179354520817936,
"grad_norm": 0.3164990246295929,
"learning_rate": 7.269952498697734e-05,
"loss": 0.7447,
"step": 83
},
{
"epoch": 0.0827790096082779,
"grad_norm": 0.28684890270233154,
"learning_rate": 7.191855733945387e-05,
"loss": 0.7701,
"step": 84
},
{
"epoch": 0.08376447400837644,
"grad_norm": 0.328673779964447,
"learning_rate": 7.113091308703498e-05,
"loss": 0.6504,
"step": 85
},
{
"epoch": 0.084749938408475,
"grad_norm": 0.3479191064834595,
"learning_rate": 7.033683215379002e-05,
"loss": 0.816,
"step": 86
},
{
"epoch": 0.08573540280857354,
"grad_norm": 0.30887213349342346,
"learning_rate": 6.953655642446368e-05,
"loss": 0.6048,
"step": 87
},
{
"epoch": 0.08672086720867209,
"grad_norm": 0.3754432797431946,
"learning_rate": 6.873032967079561e-05,
"loss": 0.728,
"step": 88
},
{
"epoch": 0.08770633160877063,
"grad_norm": 0.3510769009590149,
"learning_rate": 6.7918397477265e-05,
"loss": 0.7624,
"step": 89
},
{
"epoch": 0.08869179600886919,
"grad_norm": 0.37762296199798584,
"learning_rate": 6.710100716628344e-05,
"loss": 0.6676,
"step": 90
},
{
"epoch": 0.08967726040896773,
"grad_norm": 0.34411317110061646,
"learning_rate": 6.627840772285784e-05,
"loss": 0.6249,
"step": 91
},
{
"epoch": 0.09066272480906627,
"grad_norm": 0.3781382739543915,
"learning_rate": 6.545084971874738e-05,
"loss": 0.5981,
"step": 92
},
{
"epoch": 0.09164818920916482,
"grad_norm": 0.4274924099445343,
"learning_rate": 6.461858523613684e-05,
"loss": 0.8801,
"step": 93
},
{
"epoch": 0.09263365360926337,
"grad_norm": 0.4174174964427948,
"learning_rate": 6.378186779084995e-05,
"loss": 0.6756,
"step": 94
},
{
"epoch": 0.09361911800936192,
"grad_norm": 0.47355586290359497,
"learning_rate": 6.294095225512603e-05,
"loss": 0.8179,
"step": 95
},
{
"epoch": 0.09460458240946046,
"grad_norm": 0.4370453953742981,
"learning_rate": 6.209609477998338e-05,
"loss": 0.7869,
"step": 96
},
{
"epoch": 0.095590046809559,
"grad_norm": 0.4838176667690277,
"learning_rate": 6.124755271719325e-05,
"loss": 0.8112,
"step": 97
},
{
"epoch": 0.09657551120965756,
"grad_norm": 0.5100703835487366,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.7746,
"step": 98
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.6169785261154175,
"learning_rate": 5.9540449768827246e-05,
"loss": 0.8988,
"step": 99
},
{
"epoch": 0.09854644000985464,
"grad_norm": 0.8071909546852112,
"learning_rate": 5.868240888334653e-05,
"loss": 0.6597,
"step": 100
},
{
"epoch": 0.09854644000985464,
"eval_loss": 0.6289290189743042,
"eval_runtime": 66.4133,
"eval_samples_per_second": 25.733,
"eval_steps_per_second": 12.874,
"step": 100
},
{
"epoch": 0.09953190440995319,
"grad_norm": 0.06508608162403107,
"learning_rate": 5.782172325201155e-05,
"loss": 0.0293,
"step": 101
},
{
"epoch": 0.10051736881005174,
"grad_norm": 0.10537426918745041,
"learning_rate": 5.695865504800327e-05,
"loss": 0.1911,
"step": 102
},
{
"epoch": 0.10150283321015029,
"grad_norm": 0.10470440238714218,
"learning_rate": 5.6093467170257374e-05,
"loss": 0.2062,
"step": 103
},
{
"epoch": 0.10248829761024883,
"grad_norm": 0.1329120248556137,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2705,
"step": 104
},
{
"epoch": 0.10347376201034737,
"grad_norm": 0.1280706822872162,
"learning_rate": 5.435778713738292e-05,
"loss": 0.2274,
"step": 105
},
{
"epoch": 0.10445922641044593,
"grad_norm": 0.16757312417030334,
"learning_rate": 5.348782368720626e-05,
"loss": 0.4932,
"step": 106
},
{
"epoch": 0.10544469081054447,
"grad_norm": 0.1751958429813385,
"learning_rate": 5.26167978121472e-05,
"loss": 0.5401,
"step": 107
},
{
"epoch": 0.10643015521064302,
"grad_norm": 0.1717756688594818,
"learning_rate": 5.174497483512506e-05,
"loss": 0.6055,
"step": 108
},
{
"epoch": 0.10741561961074156,
"grad_norm": 0.19734135270118713,
"learning_rate": 5.0872620321864185e-05,
"loss": 0.5538,
"step": 109
},
{
"epoch": 0.10840108401084012,
"grad_norm": 0.18675287067890167,
"learning_rate": 5e-05,
"loss": 0.5751,
"step": 110
},
{
"epoch": 0.10938654841093866,
"grad_norm": 0.19595959782600403,
"learning_rate": 4.912737967813583e-05,
"loss": 0.5783,
"step": 111
},
{
"epoch": 0.1103720128110372,
"grad_norm": 0.20387478172779083,
"learning_rate": 4.825502516487497e-05,
"loss": 0.6778,
"step": 112
},
{
"epoch": 0.11135747721113574,
"grad_norm": 0.19632427394390106,
"learning_rate": 4.738320218785281e-05,
"loss": 0.7262,
"step": 113
},
{
"epoch": 0.1123429416112343,
"grad_norm": 0.22888943552970886,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.6576,
"step": 114
},
{
"epoch": 0.11332840601133284,
"grad_norm": 0.21430076658725739,
"learning_rate": 4.564221286261709e-05,
"loss": 0.6493,
"step": 115
},
{
"epoch": 0.11431387041143139,
"grad_norm": 0.20526853203773499,
"learning_rate": 4.477357683661734e-05,
"loss": 0.66,
"step": 116
},
{
"epoch": 0.11529933481152993,
"grad_norm": 0.2236650139093399,
"learning_rate": 4.390653282974264e-05,
"loss": 0.6334,
"step": 117
},
{
"epoch": 0.11628479921162849,
"grad_norm": 0.22159314155578613,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.7894,
"step": 118
},
{
"epoch": 0.11727026361172703,
"grad_norm": 0.22929464280605316,
"learning_rate": 4.2178276747988446e-05,
"loss": 0.6049,
"step": 119
},
{
"epoch": 0.11825572801182557,
"grad_norm": 0.2338849902153015,
"learning_rate": 4.131759111665349e-05,
"loss": 0.685,
"step": 120
},
{
"epoch": 0.11924119241192412,
"grad_norm": 0.2361065298318863,
"learning_rate": 4.045955023117276e-05,
"loss": 0.7811,
"step": 121
},
{
"epoch": 0.12022665681202267,
"grad_norm": 0.23754455149173737,
"learning_rate": 3.960441545911204e-05,
"loss": 0.7872,
"step": 122
},
{
"epoch": 0.12121212121212122,
"grad_norm": 0.2533068358898163,
"learning_rate": 3.875244728280676e-05,
"loss": 0.7897,
"step": 123
},
{
"epoch": 0.12219758561221976,
"grad_norm": 0.25761741399765015,
"learning_rate": 3.790390522001662e-05,
"loss": 0.7821,
"step": 124
},
{
"epoch": 0.1231830500123183,
"grad_norm": 0.262465238571167,
"learning_rate": 3.705904774487396e-05,
"loss": 0.8736,
"step": 125
},
{
"epoch": 0.12416851441241686,
"grad_norm": 0.28506287932395935,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.7855,
"step": 126
},
{
"epoch": 0.1251539788125154,
"grad_norm": 0.2701311707496643,
"learning_rate": 3.5381414763863166e-05,
"loss": 0.6851,
"step": 127
},
{
"epoch": 0.12613944321261394,
"grad_norm": 0.2602234184741974,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.6349,
"step": 128
},
{
"epoch": 0.1271249076127125,
"grad_norm": 0.26517555117607117,
"learning_rate": 3.372159227714218e-05,
"loss": 0.6741,
"step": 129
},
{
"epoch": 0.12811037201281103,
"grad_norm": 0.2652198076248169,
"learning_rate": 3.289899283371657e-05,
"loss": 0.569,
"step": 130
},
{
"epoch": 0.1290958364129096,
"grad_norm": 0.27912837266921997,
"learning_rate": 3.2081602522734986e-05,
"loss": 0.7768,
"step": 131
},
{
"epoch": 0.13008130081300814,
"grad_norm": 0.28829455375671387,
"learning_rate": 3.12696703292044e-05,
"loss": 0.6819,
"step": 132
},
{
"epoch": 0.13106676521310667,
"grad_norm": 0.2873208522796631,
"learning_rate": 3.046344357553632e-05,
"loss": 0.8448,
"step": 133
},
{
"epoch": 0.13205222961320523,
"grad_norm": 0.3321005702018738,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.8613,
"step": 134
},
{
"epoch": 0.13303769401330376,
"grad_norm": 0.3113601505756378,
"learning_rate": 2.886908691296504e-05,
"loss": 0.757,
"step": 135
},
{
"epoch": 0.13402315841340232,
"grad_norm": 0.3224503993988037,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.8078,
"step": 136
},
{
"epoch": 0.13500862281350087,
"grad_norm": 0.3039480745792389,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.5903,
"step": 137
},
{
"epoch": 0.1359940872135994,
"grad_norm": 0.3399895429611206,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.681,
"step": 138
},
{
"epoch": 0.13697955161369796,
"grad_norm": 0.32148513197898865,
"learning_rate": 2.575951898768315e-05,
"loss": 0.6537,
"step": 139
},
{
"epoch": 0.13796501601379652,
"grad_norm": 0.3466744124889374,
"learning_rate": 2.500000000000001e-05,
"loss": 0.7301,
"step": 140
},
{
"epoch": 0.13895048041389504,
"grad_norm": 0.35640135407447815,
"learning_rate": 2.4248096254497288e-05,
"loss": 0.7172,
"step": 141
},
{
"epoch": 0.1399359448139936,
"grad_norm": 0.38685786724090576,
"learning_rate": 2.350403678833976e-05,
"loss": 0.7447,
"step": 142
},
{
"epoch": 0.14092140921409213,
"grad_norm": 0.3715401291847229,
"learning_rate": 2.2768048249248648e-05,
"loss": 0.6587,
"step": 143
},
{
"epoch": 0.1419068736141907,
"grad_norm": 0.41979506611824036,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.7954,
"step": 144
},
{
"epoch": 0.14289233801428924,
"grad_norm": 0.4046609103679657,
"learning_rate": 2.132117818244771e-05,
"loss": 0.7503,
"step": 145
},
{
"epoch": 0.14387780241438777,
"grad_norm": 0.4440525472164154,
"learning_rate": 2.061073738537635e-05,
"loss": 0.8386,
"step": 146
},
{
"epoch": 0.14486326681448633,
"grad_norm": 0.4576321542263031,
"learning_rate": 1.9909248842397584e-05,
"loss": 0.6455,
"step": 147
},
{
"epoch": 0.1458487312145849,
"grad_norm": 0.5070066452026367,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.8399,
"step": 148
},
{
"epoch": 0.14683419561468342,
"grad_norm": 0.5966192483901978,
"learning_rate": 1.8533980447508137e-05,
"loss": 0.7003,
"step": 149
},
{
"epoch": 0.14781966001478197,
"grad_norm": 0.7911564707756042,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.6801,
"step": 150
},
{
"epoch": 0.14781966001478197,
"eval_loss": 0.6193792223930359,
"eval_runtime": 66.3669,
"eval_samples_per_second": 25.751,
"eval_steps_per_second": 12.883,
"step": 150
},
{
"epoch": 0.1488051244148805,
"grad_norm": 0.05842919647693634,
"learning_rate": 1.7197048550474643e-05,
"loss": 0.0392,
"step": 151
},
{
"epoch": 0.14979058881497906,
"grad_norm": 0.10516805201768875,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.1793,
"step": 152
},
{
"epoch": 0.15077605321507762,
"grad_norm": 0.11005867272615433,
"learning_rate": 1.5900081996875083e-05,
"loss": 0.2707,
"step": 153
},
{
"epoch": 0.15176151761517614,
"grad_norm": 0.11866780370473862,
"learning_rate": 1.526708147705013e-05,
"loss": 0.3616,
"step": 154
},
{
"epoch": 0.1527469820152747,
"grad_norm": 0.15852022171020508,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.5022,
"step": 155
},
{
"epoch": 0.15373244641537326,
"grad_norm": 0.1865692287683487,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.6785,
"step": 156
},
{
"epoch": 0.1547179108154718,
"grad_norm": 0.13659554719924927,
"learning_rate": 1.3432314919041478e-05,
"loss": 0.3663,
"step": 157
},
{
"epoch": 0.15570337521557034,
"grad_norm": 0.17033393681049347,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.5578,
"step": 158
},
{
"epoch": 0.15668883961566887,
"grad_norm": 0.17408299446105957,
"learning_rate": 1.22645209888614e-05,
"loss": 0.5313,
"step": 159
},
{
"epoch": 0.15767430401576743,
"grad_norm": 0.18068967759609222,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.6492,
"step": 160
},
{
"epoch": 0.158659768415866,
"grad_norm": 0.18489249050617218,
"learning_rate": 1.1142701927151456e-05,
"loss": 0.6622,
"step": 161
},
{
"epoch": 0.15964523281596452,
"grad_norm": 0.19225475192070007,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.6331,
"step": 162
},
{
"epoch": 0.16063069721606307,
"grad_norm": 0.17949078977108002,
"learning_rate": 1.006822449763537e-05,
"loss": 0.5526,
"step": 163
},
{
"epoch": 0.16161616161616163,
"grad_norm": 0.20066401362419128,
"learning_rate": 9.549150281252633e-06,
"loss": 0.6809,
"step": 164
},
{
"epoch": 0.16260162601626016,
"grad_norm": 0.19776208698749542,
"learning_rate": 9.042397785550405e-06,
"loss": 0.626,
"step": 165
},
{
"epoch": 0.16358709041635872,
"grad_norm": 0.20371097326278687,
"learning_rate": 8.548121372247918e-06,
"loss": 0.6395,
"step": 166
},
{
"epoch": 0.16457255481645724,
"grad_norm": 0.20566946268081665,
"learning_rate": 8.066471602728803e-06,
"loss": 0.6099,
"step": 167
},
{
"epoch": 0.1655580192165558,
"grad_norm": 0.2198767364025116,
"learning_rate": 7.597595192178702e-06,
"loss": 0.7695,
"step": 168
},
{
"epoch": 0.16654348361665436,
"grad_norm": 0.21261905133724213,
"learning_rate": 7.1416349648943894e-06,
"loss": 0.7628,
"step": 169
},
{
"epoch": 0.1675289480167529,
"grad_norm": 0.2710641920566559,
"learning_rate": 6.698729810778065e-06,
"loss": 0.814,
"step": 170
},
{
"epoch": 0.16851441241685144,
"grad_norm": 0.2244243621826172,
"learning_rate": 6.269014643030213e-06,
"loss": 0.6744,
"step": 171
},
{
"epoch": 0.16949987681695,
"grad_norm": 0.2463361620903015,
"learning_rate": 5.852620357053651e-06,
"loss": 0.7145,
"step": 172
},
{
"epoch": 0.17048534121704853,
"grad_norm": 0.25145378708839417,
"learning_rate": 5.449673790581611e-06,
"loss": 0.6732,
"step": 173
},
{
"epoch": 0.1714708056171471,
"grad_norm": 0.24809202551841736,
"learning_rate": 5.060297685041659e-06,
"loss": 0.7062,
"step": 174
},
{
"epoch": 0.17245627001724562,
"grad_norm": 0.26336124539375305,
"learning_rate": 4.684610648167503e-06,
"loss": 0.8605,
"step": 175
},
{
"epoch": 0.17344173441734417,
"grad_norm": 0.2400476485490799,
"learning_rate": 4.322727117869951e-06,
"loss": 0.6193,
"step": 176
},
{
"epoch": 0.17442719881744273,
"grad_norm": 0.2559741735458374,
"learning_rate": 3.974757327377981e-06,
"loss": 0.7645,
"step": 177
},
{
"epoch": 0.17541266321754126,
"grad_norm": 0.2447197139263153,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.7348,
"step": 178
},
{
"epoch": 0.17639812761763982,
"grad_norm": 0.259756475687027,
"learning_rate": 3.3209786751399187e-06,
"loss": 0.7928,
"step": 179
},
{
"epoch": 0.17738359201773837,
"grad_norm": 0.2761157751083374,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.7365,
"step": 180
},
{
"epoch": 0.1783690564178369,
"grad_norm": 0.2338065654039383,
"learning_rate": 2.724071220034158e-06,
"loss": 0.4513,
"step": 181
},
{
"epoch": 0.17935452081793546,
"grad_norm": 0.29236283898353577,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.7673,
"step": 182
},
{
"epoch": 0.180339985218034,
"grad_norm": 0.2648184299468994,
"learning_rate": 2.1847622018482283e-06,
"loss": 0.5189,
"step": 183
},
{
"epoch": 0.18132544961813254,
"grad_norm": 0.3015969693660736,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.7121,
"step": 184
},
{
"epoch": 0.1823109140182311,
"grad_norm": 0.280955970287323,
"learning_rate": 1.70370868554659e-06,
"loss": 0.63,
"step": 185
},
{
"epoch": 0.18329637841832963,
"grad_norm": 0.2971841096878052,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.6638,
"step": 186
},
{
"epoch": 0.1842818428184282,
"grad_norm": 0.33108440041542053,
"learning_rate": 1.2814967607382432e-06,
"loss": 0.69,
"step": 187
},
{
"epoch": 0.18526730721852674,
"grad_norm": 0.3237815201282501,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.6172,
"step": 188
},
{
"epoch": 0.18625277161862527,
"grad_norm": 0.33323925733566284,
"learning_rate": 9.186408276168013e-07,
"loss": 0.7341,
"step": 189
},
{
"epoch": 0.18723823601872383,
"grad_norm": 0.37207913398742676,
"learning_rate": 7.596123493895991e-07,
"loss": 0.6773,
"step": 190
},
{
"epoch": 0.18822370041882236,
"grad_norm": 0.3713732957839966,
"learning_rate": 6.15582970243117e-07,
"loss": 0.7245,
"step": 191
},
{
"epoch": 0.18920916481892092,
"grad_norm": 0.40831276774406433,
"learning_rate": 4.865965629214819e-07,
"loss": 0.7629,
"step": 192
},
{
"epoch": 0.19019462921901947,
"grad_norm": 0.3399887979030609,
"learning_rate": 3.7269241793390085e-07,
"loss": 0.5719,
"step": 193
},
{
"epoch": 0.191180093619118,
"grad_norm": 0.3936917185783386,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.8063,
"step": 194
},
{
"epoch": 0.19216555801921656,
"grad_norm": 0.42481428384780884,
"learning_rate": 1.9026509541272275e-07,
"loss": 0.8021,
"step": 195
},
{
"epoch": 0.19315102241931512,
"grad_norm": 0.4348626136779785,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.8341,
"step": 196
},
{
"epoch": 0.19413648681941365,
"grad_norm": 0.564532995223999,
"learning_rate": 6.852326227130834e-08,
"loss": 0.962,
"step": 197
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.4802018404006958,
"learning_rate": 3.04586490452119e-08,
"loss": 0.7501,
"step": 198
},
{
"epoch": 0.19610741561961073,
"grad_norm": 0.5178065299987793,
"learning_rate": 7.615242180436522e-09,
"loss": 0.726,
"step": 199
},
{
"epoch": 0.1970928800197093,
"grad_norm": 0.6739293336868286,
"learning_rate": 0.0,
"loss": 0.7251,
"step": 200
},
{
"epoch": 0.1970928800197093,
"eval_loss": 0.6153644323348999,
"eval_runtime": 66.3312,
"eval_samples_per_second": 25.765,
"eval_steps_per_second": 12.89,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.177685386559488e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}