tvkain's picture
Add files using upload-large-folder tool
7ca4168 verified
raw
history blame
13.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6537473733364464,
"eval_steps": 500,
"global_step": 700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009339248190520663,
"grad_norm": 6.638877692627699,
"learning_rate": 9.345794392523364e-07,
"loss": 9.2917,
"step": 1
},
{
"epoch": 0.009339248190520663,
"grad_norm": 1.1560921335705272,
"learning_rate": 9.345794392523365e-06,
"loss": 9.0876,
"step": 10
},
{
"epoch": 0.018678496381041326,
"grad_norm": 0.8415132296956432,
"learning_rate": 1.869158878504673e-05,
"loss": 8.2164,
"step": 20
},
{
"epoch": 0.02801774457156199,
"grad_norm": 0.45381630992958155,
"learning_rate": 2.8037383177570094e-05,
"loss": 7.5184,
"step": 30
},
{
"epoch": 0.03735699276208265,
"grad_norm": 0.8400636107958425,
"learning_rate": 3.738317757009346e-05,
"loss": 6.6507,
"step": 40
},
{
"epoch": 0.046696240952603316,
"grad_norm": 0.557696240829066,
"learning_rate": 4.672897196261683e-05,
"loss": 5.8909,
"step": 50
},
{
"epoch": 0.05603548914312398,
"grad_norm": 0.3971996057467842,
"learning_rate": 5.607476635514019e-05,
"loss": 5.4127,
"step": 60
},
{
"epoch": 0.06537473733364464,
"grad_norm": 0.2932710540265688,
"learning_rate": 6.542056074766355e-05,
"loss": 5.0106,
"step": 70
},
{
"epoch": 0.0747139855241653,
"grad_norm": 0.3682690443551033,
"learning_rate": 7.476635514018692e-05,
"loss": 4.6042,
"step": 80
},
{
"epoch": 0.08405323371468597,
"grad_norm": 0.3132971920011515,
"learning_rate": 8.411214953271028e-05,
"loss": 4.2031,
"step": 90
},
{
"epoch": 0.09339248190520663,
"grad_norm": 0.6731868159213446,
"learning_rate": 9.345794392523365e-05,
"loss": 3.9423,
"step": 100
},
{
"epoch": 0.1027317300957273,
"grad_norm": 0.27848867836763197,
"learning_rate": 0.000102803738317757,
"loss": 3.7157,
"step": 110
},
{
"epoch": 0.11207097828624796,
"grad_norm": 0.24642109032991807,
"learning_rate": 0.00011214953271028037,
"loss": 3.4516,
"step": 120
},
{
"epoch": 0.12141022647676862,
"grad_norm": 0.25717384664029797,
"learning_rate": 0.00012149532710280373,
"loss": 3.2167,
"step": 130
},
{
"epoch": 0.13074947466728928,
"grad_norm": 0.20912922668565637,
"learning_rate": 0.0001308411214953271,
"loss": 3.0237,
"step": 140
},
{
"epoch": 0.14008872285780993,
"grad_norm": 0.15805888388706113,
"learning_rate": 0.00014018691588785047,
"loss": 2.8529,
"step": 150
},
{
"epoch": 0.1494279710483306,
"grad_norm": 0.23370349497479534,
"learning_rate": 0.00014953271028037384,
"loss": 2.7078,
"step": 160
},
{
"epoch": 0.15876721923885126,
"grad_norm": 0.1802138633012483,
"learning_rate": 0.0001588785046728972,
"loss": 2.6115,
"step": 170
},
{
"epoch": 0.16810646742937194,
"grad_norm": 0.13354347610039718,
"learning_rate": 0.00016822429906542056,
"loss": 2.5309,
"step": 180
},
{
"epoch": 0.17744571561989259,
"grad_norm": 0.09414865188086892,
"learning_rate": 0.00017757009345794393,
"loss": 2.4452,
"step": 190
},
{
"epoch": 0.18678496381041326,
"grad_norm": 0.08333601554768896,
"learning_rate": 0.0001869158878504673,
"loss": 2.3832,
"step": 200
},
{
"epoch": 0.1961242120009339,
"grad_norm": 0.15926414699806835,
"learning_rate": 0.00019626168224299065,
"loss": 2.3492,
"step": 210
},
{
"epoch": 0.2054634601914546,
"grad_norm": 0.09492820761057012,
"learning_rate": 0.0001999989254250208,
"loss": 2.323,
"step": 220
},
{
"epoch": 0.21480270838197524,
"grad_norm": 0.0801349259356147,
"learning_rate": 0.00019999235866155886,
"loss": 2.2731,
"step": 230
},
{
"epoch": 0.22414195657249592,
"grad_norm": 0.12210960524693895,
"learning_rate": 0.00019997982251228469,
"loss": 2.2433,
"step": 240
},
{
"epoch": 0.23348120476301656,
"grad_norm": 3.14289498732125,
"learning_rate": 0.00019996131772558666,
"loss": 3.2769,
"step": 250
},
{
"epoch": 0.24282045295353724,
"grad_norm": 1.632940983166179,
"learning_rate": 0.00019993684540617132,
"loss": 4.9343,
"step": 260
},
{
"epoch": 0.2521597011440579,
"grad_norm": 3.4831252230225416,
"learning_rate": 0.00019990640701499736,
"loss": 4.2768,
"step": 270
},
{
"epoch": 0.26149894933457857,
"grad_norm": 1.6069045920523788,
"learning_rate": 0.00019987000436918874,
"loss": 5.9581,
"step": 280
},
{
"epoch": 0.27083819752509924,
"grad_norm": 0.2220907936615993,
"learning_rate": 0.00019982763964192585,
"loss": 3.8228,
"step": 290
},
{
"epoch": 0.28017744571561987,
"grad_norm": 0.24737284913291765,
"learning_rate": 0.00019977931536231596,
"loss": 3.1413,
"step": 300
},
{
"epoch": 0.28951669390614054,
"grad_norm": 4.010404518241152,
"learning_rate": 0.00019972503441524224,
"loss": 2.8432,
"step": 310
},
{
"epoch": 0.2988559420966612,
"grad_norm": 0.1515583580811596,
"learning_rate": 0.00019966480004119142,
"loss": 2.7859,
"step": 320
},
{
"epoch": 0.3081951902871819,
"grad_norm": 0.11259395750650594,
"learning_rate": 0.00019959861583606045,
"loss": 2.5821,
"step": 330
},
{
"epoch": 0.3175344384777025,
"grad_norm": 0.22514797814956813,
"learning_rate": 0.00019952648575094183,
"loss": 2.4517,
"step": 340
},
{
"epoch": 0.3268736866682232,
"grad_norm": 0.08040136172033542,
"learning_rate": 0.00019944841409188767,
"loss": 2.3794,
"step": 350
},
{
"epoch": 0.3362129348587439,
"grad_norm": 0.054758073593565354,
"learning_rate": 0.00019936440551965263,
"loss": 2.3232,
"step": 360
},
{
"epoch": 0.34555218304926455,
"grad_norm": 0.06742998909645591,
"learning_rate": 0.00019927446504941577,
"loss": 2.2776,
"step": 370
},
{
"epoch": 0.35489143123978517,
"grad_norm": 0.048780907584876736,
"learning_rate": 0.00019917859805048096,
"loss": 2.2376,
"step": 380
},
{
"epoch": 0.36423067943030585,
"grad_norm": 0.0475325963052214,
"learning_rate": 0.00019907681024595663,
"loss": 2.2191,
"step": 390
},
{
"epoch": 0.3735699276208265,
"grad_norm": 0.054089563211590065,
"learning_rate": 0.00019896910771241387,
"loss": 2.1961,
"step": 400
},
{
"epoch": 0.3829091758113472,
"grad_norm": 0.21798406131864823,
"learning_rate": 0.00019885549687952372,
"loss": 2.2078,
"step": 410
},
{
"epoch": 0.3922484240018678,
"grad_norm": 0.8673185709111124,
"learning_rate": 0.00019873598452967338,
"loss": 2.3731,
"step": 420
},
{
"epoch": 0.4015876721923885,
"grad_norm": 0.22424350669971718,
"learning_rate": 0.0001986105777975613,
"loss": 2.6195,
"step": 430
},
{
"epoch": 0.4109269203829092,
"grad_norm": 0.307418135168262,
"learning_rate": 0.00019847928416977126,
"loss": 2.3624,
"step": 440
},
{
"epoch": 0.42026616857342985,
"grad_norm": 0.07944722668080402,
"learning_rate": 0.00019834211148432536,
"loss": 2.2799,
"step": 450
},
{
"epoch": 0.4296054167639505,
"grad_norm": 0.18146933758664588,
"learning_rate": 0.00019819906793021614,
"loss": 2.2177,
"step": 460
},
{
"epoch": 0.43894466495447115,
"grad_norm": 0.07035825837333018,
"learning_rate": 0.0001980501620469178,
"loss": 2.1767,
"step": 470
},
{
"epoch": 0.44828391314499183,
"grad_norm": 0.04596186944454228,
"learning_rate": 0.0001978954027238763,
"loss": 2.1598,
"step": 480
},
{
"epoch": 0.4576231613355125,
"grad_norm": 0.041342347745088055,
"learning_rate": 0.0001977347991999786,
"loss": 2.131,
"step": 490
},
{
"epoch": 0.46696240952603313,
"grad_norm": 0.04172063219841485,
"learning_rate": 0.00019756836106300137,
"loss": 2.1231,
"step": 500
},
{
"epoch": 0.4763016577165538,
"grad_norm": 0.03373646457711144,
"learning_rate": 0.00019739609824903843,
"loss": 2.1146,
"step": 510
},
{
"epoch": 0.4856409059070745,
"grad_norm": 0.03736871030676605,
"learning_rate": 0.00019721802104190748,
"loss": 2.1003,
"step": 520
},
{
"epoch": 0.49498015409759516,
"grad_norm": 0.033931028038211034,
"learning_rate": 0.00019703414007253645,
"loss": 2.0983,
"step": 530
},
{
"epoch": 0.5043194022881158,
"grad_norm": 0.03790055446070549,
"learning_rate": 0.00019684446631832868,
"loss": 2.092,
"step": 540
},
{
"epoch": 0.5136586504786365,
"grad_norm": 0.030956192803893078,
"learning_rate": 0.00019664901110250758,
"loss": 2.0807,
"step": 550
},
{
"epoch": 0.5229978986691571,
"grad_norm": 0.03542530209935129,
"learning_rate": 0.00019644778609344068,
"loss": 2.0773,
"step": 560
},
{
"epoch": 0.5323371468596778,
"grad_norm": 0.040947757568902336,
"learning_rate": 0.00019624080330394306,
"loss": 2.0649,
"step": 570
},
{
"epoch": 0.5416763950501985,
"grad_norm": 0.034273415973688146,
"learning_rate": 0.00019602807509056018,
"loss": 2.0479,
"step": 580
},
{
"epoch": 0.5510156432407192,
"grad_norm": 0.031427481498873144,
"learning_rate": 0.00019580961415283028,
"loss": 2.0563,
"step": 590
},
{
"epoch": 0.5603548914312397,
"grad_norm": 0.03141549752041532,
"learning_rate": 0.00019558543353252611,
"loss": 2.0503,
"step": 600
},
{
"epoch": 0.5696941396217604,
"grad_norm": 0.033012392726428204,
"learning_rate": 0.00019535554661287652,
"loss": 2.0389,
"step": 610
},
{
"epoch": 0.5790333878122811,
"grad_norm": 0.02913261992661444,
"learning_rate": 0.0001951199671177673,
"loss": 2.036,
"step": 620
},
{
"epoch": 0.5883726360028018,
"grad_norm": 0.030543903708435332,
"learning_rate": 0.00019487870911092214,
"loss": 2.0326,
"step": 630
},
{
"epoch": 0.5977118841933224,
"grad_norm": 0.03215005545393897,
"learning_rate": 0.00019463178699506277,
"loss": 2.0231,
"step": 640
},
{
"epoch": 0.6070511323838431,
"grad_norm": 0.03823630791937631,
"learning_rate": 0.00019437921551104933,
"loss": 2.0293,
"step": 650
},
{
"epoch": 0.6163903805743638,
"grad_norm": 0.03200103149471209,
"learning_rate": 0.00019412100973700038,
"loss": 2.017,
"step": 660
},
{
"epoch": 0.6257296287648845,
"grad_norm": 0.03841804153577787,
"learning_rate": 0.00019385718508739262,
"loss": 2.0135,
"step": 670
},
{
"epoch": 0.635068876955405,
"grad_norm": 0.03052396655271533,
"learning_rate": 0.0001935877573121407,
"loss": 2.0237,
"step": 680
},
{
"epoch": 0.6444081251459257,
"grad_norm": 0.033817837533771815,
"learning_rate": 0.00019331274249565717,
"loss": 2.0069,
"step": 690
},
{
"epoch": 0.6537473733364464,
"grad_norm": 0.028286999650643876,
"learning_rate": 0.00019303215705589194,
"loss": 2.0112,
"step": 700
}
],
"logging_steps": 10,
"max_steps": 4280,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.667909740273861e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}