SmolLM2-1.7B-Espirita / checkpoint-1929\trainer_state.json
elissoncardoso1's picture
Upload checkpoint-1929\trainer_state.json with huggingface_hub
e9710f8 verified
{
"best_global_step": 1500,
"best_metric": 1.4234389066696167,
"best_model_checkpoint": "./finetuned_model\\checkpoint-1500",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1929,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015570260801868432,
"grad_norm": 0.20217418670654297,
"learning_rate": 1.8e-05,
"loss": 2.2788,
"step": 10
},
{
"epoch": 0.031140521603736863,
"grad_norm": 0.21721592545509338,
"learning_rate": 3.8e-05,
"loss": 2.2974,
"step": 20
},
{
"epoch": 0.04671078240560529,
"grad_norm": 0.18691149353981018,
"learning_rate": 5.8e-05,
"loss": 2.2254,
"step": 30
},
{
"epoch": 0.06228104320747373,
"grad_norm": 0.2627484202384949,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1722,
"step": 40
},
{
"epoch": 0.07785130400934216,
"grad_norm": 0.4096653461456299,
"learning_rate": 9.8e-05,
"loss": 2.0316,
"step": 50
},
{
"epoch": 0.09342156481121058,
"grad_norm": 2.0437684059143066,
"learning_rate": 0.000118,
"loss": 1.8762,
"step": 60
},
{
"epoch": 0.10899182561307902,
"grad_norm": 0.2537792921066284,
"learning_rate": 0.000138,
"loss": 1.8509,
"step": 70
},
{
"epoch": 0.12456208641494745,
"grad_norm": 0.25586166977882385,
"learning_rate": 0.00015800000000000002,
"loss": 1.7,
"step": 80
},
{
"epoch": 0.1401323472168159,
"grad_norm": 0.2822591960430145,
"learning_rate": 0.00017800000000000002,
"loss": 1.7905,
"step": 90
},
{
"epoch": 0.15570260801868432,
"grad_norm": 0.2682412564754486,
"learning_rate": 0.00019800000000000002,
"loss": 1.7157,
"step": 100
},
{
"epoch": 0.17127286882055273,
"grad_norm": 0.30413559079170227,
"learning_rate": 0.00019901585565882995,
"loss": 1.6886,
"step": 110
},
{
"epoch": 0.18684312962242117,
"grad_norm": 0.30645519495010376,
"learning_rate": 0.00019792236194641883,
"loss": 1.6956,
"step": 120
},
{
"epoch": 0.2024133904242896,
"grad_norm": 0.3002190589904785,
"learning_rate": 0.00019682886823400766,
"loss": 1.6521,
"step": 130
},
{
"epoch": 0.21798365122615804,
"grad_norm": 0.29828041791915894,
"learning_rate": 0.0001957353745215965,
"loss": 1.6584,
"step": 140
},
{
"epoch": 0.23355391202802647,
"grad_norm": 0.301546573638916,
"learning_rate": 0.00019464188080918536,
"loss": 1.6613,
"step": 150
},
{
"epoch": 0.2491241728298949,
"grad_norm": 0.347465842962265,
"learning_rate": 0.00019354838709677422,
"loss": 1.6284,
"step": 160
},
{
"epoch": 0.2646944336317633,
"grad_norm": 0.31749042868614197,
"learning_rate": 0.00019245489338436304,
"loss": 1.5873,
"step": 170
},
{
"epoch": 0.2802646944336318,
"grad_norm": 0.32523462176322937,
"learning_rate": 0.0001913613996719519,
"loss": 1.5703,
"step": 180
},
{
"epoch": 0.2958349552355002,
"grad_norm": 0.3478928804397583,
"learning_rate": 0.00019026790595954074,
"loss": 1.6148,
"step": 190
},
{
"epoch": 0.31140521603736865,
"grad_norm": 0.3863014578819275,
"learning_rate": 0.0001891744122471296,
"loss": 1.6695,
"step": 200
},
{
"epoch": 0.32697547683923706,
"grad_norm": 0.31040167808532715,
"learning_rate": 0.00018808091853471842,
"loss": 1.636,
"step": 210
},
{
"epoch": 0.34254573764110546,
"grad_norm": 0.34434080123901367,
"learning_rate": 0.0001869874248223073,
"loss": 1.6309,
"step": 220
},
{
"epoch": 0.3581159984429739,
"grad_norm": 0.35271912813186646,
"learning_rate": 0.00018589393110989613,
"loss": 1.5183,
"step": 230
},
{
"epoch": 0.37368625924484233,
"grad_norm": 0.3652435839176178,
"learning_rate": 0.00018480043739748498,
"loss": 1.5748,
"step": 240
},
{
"epoch": 0.3892565200467108,
"grad_norm": 0.3423960208892822,
"learning_rate": 0.00018370694368507383,
"loss": 1.6099,
"step": 250
},
{
"epoch": 0.4048267808485792,
"grad_norm": 0.3742424249649048,
"learning_rate": 0.00018261344997266268,
"loss": 1.6456,
"step": 260
},
{
"epoch": 0.42039704165044767,
"grad_norm": 0.36541569232940674,
"learning_rate": 0.0001815199562602515,
"loss": 1.5256,
"step": 270
},
{
"epoch": 0.4359673024523161,
"grad_norm": 0.32436609268188477,
"learning_rate": 0.00018042646254784036,
"loss": 1.6154,
"step": 280
},
{
"epoch": 0.4515375632541845,
"grad_norm": 0.3482036888599396,
"learning_rate": 0.0001793329688354292,
"loss": 1.5598,
"step": 290
},
{
"epoch": 0.46710782405605294,
"grad_norm": 0.32296016812324524,
"learning_rate": 0.00017823947512301804,
"loss": 1.557,
"step": 300
},
{
"epoch": 0.48267808485792135,
"grad_norm": 0.3758240044116974,
"learning_rate": 0.0001771459814106069,
"loss": 1.5237,
"step": 310
},
{
"epoch": 0.4982483456597898,
"grad_norm": 0.3592066466808319,
"learning_rate": 0.00017605248769819574,
"loss": 1.5331,
"step": 320
},
{
"epoch": 0.5138186064616582,
"grad_norm": 0.3503170907497406,
"learning_rate": 0.0001749589939857846,
"loss": 1.5598,
"step": 330
},
{
"epoch": 0.5293888672635266,
"grad_norm": 0.3589423894882202,
"learning_rate": 0.00017386550027337342,
"loss": 1.5694,
"step": 340
},
{
"epoch": 0.5449591280653951,
"grad_norm": 0.4173847436904907,
"learning_rate": 0.0001727720065609623,
"loss": 1.6035,
"step": 350
},
{
"epoch": 0.5605293888672636,
"grad_norm": 0.3429367244243622,
"learning_rate": 0.00017167851284855112,
"loss": 1.5636,
"step": 360
},
{
"epoch": 0.576099649669132,
"grad_norm": 0.3459906280040741,
"learning_rate": 0.00017058501913613997,
"loss": 1.4638,
"step": 370
},
{
"epoch": 0.5916699104710004,
"grad_norm": 0.36562731862068176,
"learning_rate": 0.00016949152542372882,
"loss": 1.5236,
"step": 380
},
{
"epoch": 0.6072401712728688,
"grad_norm": 0.4281690716743469,
"learning_rate": 0.00016839803171131768,
"loss": 1.5328,
"step": 390
},
{
"epoch": 0.6228104320747373,
"grad_norm": 0.3289957642555237,
"learning_rate": 0.0001673045379989065,
"loss": 1.5511,
"step": 400
},
{
"epoch": 0.6383806928766057,
"grad_norm": 0.34759992361068726,
"learning_rate": 0.00016621104428649535,
"loss": 1.5047,
"step": 410
},
{
"epoch": 0.6539509536784741,
"grad_norm": 0.36279717087745667,
"learning_rate": 0.0001651175505740842,
"loss": 1.5314,
"step": 420
},
{
"epoch": 0.6695212144803425,
"grad_norm": 0.3549306094646454,
"learning_rate": 0.00016402405686167306,
"loss": 1.5158,
"step": 430
},
{
"epoch": 0.6850914752822109,
"grad_norm": 0.37329429388046265,
"learning_rate": 0.00016293056314926188,
"loss": 1.4927,
"step": 440
},
{
"epoch": 0.7006617360840794,
"grad_norm": 0.3531767427921295,
"learning_rate": 0.00016183706943685076,
"loss": 1.5568,
"step": 450
},
{
"epoch": 0.7162319968859479,
"grad_norm": 0.3837789297103882,
"learning_rate": 0.0001607435757244396,
"loss": 1.5042,
"step": 460
},
{
"epoch": 0.7318022576878163,
"grad_norm": 0.3604554533958435,
"learning_rate": 0.00015965008201202844,
"loss": 1.5362,
"step": 470
},
{
"epoch": 0.7473725184896847,
"grad_norm": 0.4049264192581177,
"learning_rate": 0.0001585565882996173,
"loss": 1.5754,
"step": 480
},
{
"epoch": 0.7629427792915532,
"grad_norm": 0.3917747139930725,
"learning_rate": 0.00015746309458720614,
"loss": 1.5257,
"step": 490
},
{
"epoch": 0.7785130400934216,
"grad_norm": 0.37551721930503845,
"learning_rate": 0.00015636960087479497,
"loss": 1.5712,
"step": 500
},
{
"epoch": 0.7785130400934216,
"eval_loss": 1.5167639255523682,
"eval_runtime": 260.2191,
"eval_samples_per_second": 9.872,
"eval_steps_per_second": 1.237,
"step": 500
},
{
"epoch": 0.79408330089529,
"grad_norm": 0.4406639635562897,
"learning_rate": 0.00015527610716238382,
"loss": 1.5319,
"step": 510
},
{
"epoch": 0.8096535616971584,
"grad_norm": 0.37547779083251953,
"learning_rate": 0.00015418261344997267,
"loss": 1.535,
"step": 520
},
{
"epoch": 0.8252238224990268,
"grad_norm": 0.3571765422821045,
"learning_rate": 0.00015308911973756152,
"loss": 1.4872,
"step": 530
},
{
"epoch": 0.8407940833008953,
"grad_norm": 0.36505362391471863,
"learning_rate": 0.00015199562602515035,
"loss": 1.5346,
"step": 540
},
{
"epoch": 0.8563643441027637,
"grad_norm": 0.34871625900268555,
"learning_rate": 0.00015090213231273923,
"loss": 1.5243,
"step": 550
},
{
"epoch": 0.8719346049046321,
"grad_norm": 0.374802827835083,
"learning_rate": 0.00014980863860032805,
"loss": 1.5031,
"step": 560
},
{
"epoch": 0.8875048657065006,
"grad_norm": 0.41518205404281616,
"learning_rate": 0.00014871514488791688,
"loss": 1.5265,
"step": 570
},
{
"epoch": 0.903075126508369,
"grad_norm": 0.3830599784851074,
"learning_rate": 0.00014762165117550576,
"loss": 1.5212,
"step": 580
},
{
"epoch": 0.9186453873102375,
"grad_norm": 0.37755969166755676,
"learning_rate": 0.00014652815746309458,
"loss": 1.49,
"step": 590
},
{
"epoch": 0.9342156481121059,
"grad_norm": 0.3936685621738434,
"learning_rate": 0.00014543466375068343,
"loss": 1.5361,
"step": 600
},
{
"epoch": 0.9497859089139743,
"grad_norm": 0.35126620531082153,
"learning_rate": 0.0001443411700382723,
"loss": 1.5182,
"step": 610
},
{
"epoch": 0.9653561697158427,
"grad_norm": 0.4049900770187378,
"learning_rate": 0.00014324767632586114,
"loss": 1.5134,
"step": 620
},
{
"epoch": 0.9809264305177112,
"grad_norm": 0.39619603753089905,
"learning_rate": 0.00014215418261344996,
"loss": 1.4918,
"step": 630
},
{
"epoch": 0.9964966913195796,
"grad_norm": 0.38389134407043457,
"learning_rate": 0.00014106068890103882,
"loss": 1.5066,
"step": 640
},
{
"epoch": 1.0108991825613078,
"grad_norm": 0.3816758096218109,
"learning_rate": 0.00013996719518862767,
"loss": 1.4342,
"step": 650
},
{
"epoch": 1.0264694433631762,
"grad_norm": 0.3684230446815491,
"learning_rate": 0.00013887370147621652,
"loss": 1.509,
"step": 660
},
{
"epoch": 1.0420397041650449,
"grad_norm": 0.4102369248867035,
"learning_rate": 0.00013778020776380534,
"loss": 1.4011,
"step": 670
},
{
"epoch": 1.0576099649669133,
"grad_norm": 0.40174803137779236,
"learning_rate": 0.00013668671405139422,
"loss": 1.4155,
"step": 680
},
{
"epoch": 1.0731802257687817,
"grad_norm": 0.4212823510169983,
"learning_rate": 0.00013559322033898305,
"loss": 1.4078,
"step": 690
},
{
"epoch": 1.08875048657065,
"grad_norm": 0.41347914934158325,
"learning_rate": 0.0001344997266265719,
"loss": 1.4245,
"step": 700
},
{
"epoch": 1.1043207473725185,
"grad_norm": 0.3938431441783905,
"learning_rate": 0.00013340623291416075,
"loss": 1.3777,
"step": 710
},
{
"epoch": 1.119891008174387,
"grad_norm": 0.4172612130641937,
"learning_rate": 0.0001323127392017496,
"loss": 1.4435,
"step": 720
},
{
"epoch": 1.1354612689762553,
"grad_norm": 0.4305002987384796,
"learning_rate": 0.00013121924548933843,
"loss": 1.479,
"step": 730
},
{
"epoch": 1.1510315297781237,
"grad_norm": 0.4031375050544739,
"learning_rate": 0.0001301257517769273,
"loss": 1.4107,
"step": 740
},
{
"epoch": 1.1666017905799921,
"grad_norm": 0.4102884829044342,
"learning_rate": 0.00012903225806451613,
"loss": 1.5123,
"step": 750
},
{
"epoch": 1.1821720513818605,
"grad_norm": 0.44275203347206116,
"learning_rate": 0.00012793876435210499,
"loss": 1.426,
"step": 760
},
{
"epoch": 1.1977423121837292,
"grad_norm": 0.4467061758041382,
"learning_rate": 0.00012684527063969384,
"loss": 1.4495,
"step": 770
},
{
"epoch": 1.2133125729855976,
"grad_norm": 0.40034279227256775,
"learning_rate": 0.0001257517769272827,
"loss": 1.4541,
"step": 780
},
{
"epoch": 1.228882833787466,
"grad_norm": 0.42505356669425964,
"learning_rate": 0.00012465828321487152,
"loss": 1.4767,
"step": 790
},
{
"epoch": 1.2444530945893344,
"grad_norm": 0.3754604458808899,
"learning_rate": 0.00012356478950246037,
"loss": 1.4144,
"step": 800
},
{
"epoch": 1.2600233553912028,
"grad_norm": 0.3871001601219177,
"learning_rate": 0.00012247129579004922,
"loss": 1.4618,
"step": 810
},
{
"epoch": 1.2755936161930712,
"grad_norm": 0.416062593460083,
"learning_rate": 0.00012137780207763807,
"loss": 1.4258,
"step": 820
},
{
"epoch": 1.2911638769949396,
"grad_norm": 0.3869543969631195,
"learning_rate": 0.00012028430836522691,
"loss": 1.379,
"step": 830
},
{
"epoch": 1.306734137796808,
"grad_norm": 0.4507176876068115,
"learning_rate": 0.00011919081465281574,
"loss": 1.4011,
"step": 840
},
{
"epoch": 1.3223043985986767,
"grad_norm": 0.4298593997955322,
"learning_rate": 0.0001180973209404046,
"loss": 1.3897,
"step": 850
},
{
"epoch": 1.337874659400545,
"grad_norm": 0.4016563296318054,
"learning_rate": 0.00011700382722799344,
"loss": 1.4004,
"step": 860
},
{
"epoch": 1.3534449202024135,
"grad_norm": 0.45489302277565,
"learning_rate": 0.00011591033351558229,
"loss": 1.3974,
"step": 870
},
{
"epoch": 1.3690151810042819,
"grad_norm": 0.4141370356082916,
"learning_rate": 0.00011481683980317113,
"loss": 1.4417,
"step": 880
},
{
"epoch": 1.3845854418061503,
"grad_norm": 0.4654589295387268,
"learning_rate": 0.00011372334609075998,
"loss": 1.479,
"step": 890
},
{
"epoch": 1.4001557026080187,
"grad_norm": 0.4696764051914215,
"learning_rate": 0.00011262985237834882,
"loss": 1.4009,
"step": 900
},
{
"epoch": 1.415725963409887,
"grad_norm": 0.4807955324649811,
"learning_rate": 0.00011153635866593767,
"loss": 1.3897,
"step": 910
},
{
"epoch": 1.4312962242117555,
"grad_norm": 0.5069774985313416,
"learning_rate": 0.00011044286495352651,
"loss": 1.431,
"step": 920
},
{
"epoch": 1.446866485013624,
"grad_norm": 0.43647122383117676,
"learning_rate": 0.00010934937124111538,
"loss": 1.4042,
"step": 930
},
{
"epoch": 1.4624367458154923,
"grad_norm": 0.43918347358703613,
"learning_rate": 0.0001082558775287042,
"loss": 1.4403,
"step": 940
},
{
"epoch": 1.4780070066173607,
"grad_norm": 0.39581167697906494,
"learning_rate": 0.00010716238381629307,
"loss": 1.4027,
"step": 950
},
{
"epoch": 1.4935772674192291,
"grad_norm": 0.42234891653060913,
"learning_rate": 0.0001060688901038819,
"loss": 1.4291,
"step": 960
},
{
"epoch": 1.5091475282210975,
"grad_norm": 0.40230894088745117,
"learning_rate": 0.00010497539639147076,
"loss": 1.3847,
"step": 970
},
{
"epoch": 1.5247177890229662,
"grad_norm": 0.41914451122283936,
"learning_rate": 0.0001038819026790596,
"loss": 1.364,
"step": 980
},
{
"epoch": 1.5402880498248346,
"grad_norm": 0.4406910240650177,
"learning_rate": 0.00010278840896664845,
"loss": 1.4437,
"step": 990
},
{
"epoch": 1.555858310626703,
"grad_norm": 0.4232882857322693,
"learning_rate": 0.00010169491525423729,
"loss": 1.4319,
"step": 1000
},
{
"epoch": 1.555858310626703,
"eval_loss": 1.4522794485092163,
"eval_runtime": 260.6355,
"eval_samples_per_second": 9.857,
"eval_steps_per_second": 1.235,
"step": 1000
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.41939419507980347,
"learning_rate": 0.00010060142154182615,
"loss": 1.4449,
"step": 1010
},
{
"epoch": 1.58699883223044,
"grad_norm": 0.44079649448394775,
"learning_rate": 9.950792782941498e-05,
"loss": 1.4285,
"step": 1020
},
{
"epoch": 1.6025690930323084,
"grad_norm": 0.3999095857143402,
"learning_rate": 9.841443411700383e-05,
"loss": 1.4375,
"step": 1030
},
{
"epoch": 1.6181393538341768,
"grad_norm": 0.4581930637359619,
"learning_rate": 9.732094040459268e-05,
"loss": 1.4495,
"step": 1040
},
{
"epoch": 1.6337096146360452,
"grad_norm": 0.48736894130706787,
"learning_rate": 9.622744669218152e-05,
"loss": 1.4261,
"step": 1050
},
{
"epoch": 1.6492798754379137,
"grad_norm": 0.43217116594314575,
"learning_rate": 9.513395297977037e-05,
"loss": 1.4329,
"step": 1060
},
{
"epoch": 1.664850136239782,
"grad_norm": 0.46019837260246277,
"learning_rate": 9.404045926735921e-05,
"loss": 1.3978,
"step": 1070
},
{
"epoch": 1.6804203970416505,
"grad_norm": 0.45845848321914673,
"learning_rate": 9.294696555494806e-05,
"loss": 1.329,
"step": 1080
},
{
"epoch": 1.6959906578435189,
"grad_norm": 0.4310099184513092,
"learning_rate": 9.185347184253691e-05,
"loss": 1.3823,
"step": 1090
},
{
"epoch": 1.7115609186453873,
"grad_norm": 0.4994125962257385,
"learning_rate": 9.075997813012575e-05,
"loss": 1.4715,
"step": 1100
},
{
"epoch": 1.7271311794472557,
"grad_norm": 0.4259664714336395,
"learning_rate": 8.96664844177146e-05,
"loss": 1.3662,
"step": 1110
},
{
"epoch": 1.742701440249124,
"grad_norm": 0.4763588309288025,
"learning_rate": 8.857299070530344e-05,
"loss": 1.4313,
"step": 1120
},
{
"epoch": 1.7582717010509925,
"grad_norm": 0.43702080845832825,
"learning_rate": 8.74794969928923e-05,
"loss": 1.3815,
"step": 1130
},
{
"epoch": 1.773841961852861,
"grad_norm": 0.41179555654525757,
"learning_rate": 8.638600328048115e-05,
"loss": 1.4042,
"step": 1140
},
{
"epoch": 1.7894122226547293,
"grad_norm": 0.469272255897522,
"learning_rate": 8.529250956806999e-05,
"loss": 1.4391,
"step": 1150
},
{
"epoch": 1.804982483456598,
"grad_norm": 0.4028104841709137,
"learning_rate": 8.419901585565884e-05,
"loss": 1.4205,
"step": 1160
},
{
"epoch": 1.8205527442584664,
"grad_norm": 0.44282203912734985,
"learning_rate": 8.310552214324768e-05,
"loss": 1.4767,
"step": 1170
},
{
"epoch": 1.8361230050603348,
"grad_norm": 0.43608203530311584,
"learning_rate": 8.201202843083653e-05,
"loss": 1.3755,
"step": 1180
},
{
"epoch": 1.8516932658622032,
"grad_norm": 0.4701242446899414,
"learning_rate": 8.091853471842538e-05,
"loss": 1.371,
"step": 1190
},
{
"epoch": 1.8672635266640716,
"grad_norm": 0.415041446685791,
"learning_rate": 7.982504100601422e-05,
"loss": 1.4174,
"step": 1200
},
{
"epoch": 1.8828337874659402,
"grad_norm": 0.38387399911880493,
"learning_rate": 7.873154729360307e-05,
"loss": 1.3444,
"step": 1210
},
{
"epoch": 1.8984040482678086,
"grad_norm": 0.45225557684898376,
"learning_rate": 7.763805358119191e-05,
"loss": 1.3988,
"step": 1220
},
{
"epoch": 1.913974309069677,
"grad_norm": 0.47453585267066956,
"learning_rate": 7.654455986878076e-05,
"loss": 1.3585,
"step": 1230
},
{
"epoch": 1.9295445698715454,
"grad_norm": 0.45056021213531494,
"learning_rate": 7.545106615636961e-05,
"loss": 1.3662,
"step": 1240
},
{
"epoch": 1.9451148306734138,
"grad_norm": 0.40802398324012756,
"learning_rate": 7.435757244395844e-05,
"loss": 1.3917,
"step": 1250
},
{
"epoch": 1.9606850914752822,
"grad_norm": 0.4195387661457062,
"learning_rate": 7.326407873154729e-05,
"loss": 1.4025,
"step": 1260
},
{
"epoch": 1.9762553522771507,
"grad_norm": 0.41928455233573914,
"learning_rate": 7.217058501913614e-05,
"loss": 1.3368,
"step": 1270
},
{
"epoch": 1.991825613079019,
"grad_norm": 0.5062233209609985,
"learning_rate": 7.107709130672498e-05,
"loss": 1.4474,
"step": 1280
},
{
"epoch": 2.0062281043207473,
"grad_norm": 0.42787787318229675,
"learning_rate": 6.998359759431383e-05,
"loss": 1.4169,
"step": 1290
},
{
"epoch": 2.0217983651226157,
"grad_norm": 0.4181097149848938,
"learning_rate": 6.889010388190267e-05,
"loss": 1.3275,
"step": 1300
},
{
"epoch": 2.037368625924484,
"grad_norm": 0.43423986434936523,
"learning_rate": 6.779661016949152e-05,
"loss": 1.3302,
"step": 1310
},
{
"epoch": 2.0529388867263525,
"grad_norm": 0.4697537124156952,
"learning_rate": 6.670311645708038e-05,
"loss": 1.4016,
"step": 1320
},
{
"epoch": 2.068509147528221,
"grad_norm": 0.5001850128173828,
"learning_rate": 6.560962274466922e-05,
"loss": 1.3086,
"step": 1330
},
{
"epoch": 2.0840794083300898,
"grad_norm": 0.4848284125328064,
"learning_rate": 6.451612903225807e-05,
"loss": 1.337,
"step": 1340
},
{
"epoch": 2.099649669131958,
"grad_norm": 0.470833420753479,
"learning_rate": 6.342263531984692e-05,
"loss": 1.3745,
"step": 1350
},
{
"epoch": 2.1152199299338266,
"grad_norm": 0.42497771978378296,
"learning_rate": 6.232914160743576e-05,
"loss": 1.3237,
"step": 1360
},
{
"epoch": 2.130790190735695,
"grad_norm": 0.4388049840927124,
"learning_rate": 6.123564789502461e-05,
"loss": 1.3423,
"step": 1370
},
{
"epoch": 2.1463604515375634,
"grad_norm": 0.45158448815345764,
"learning_rate": 6.0142154182613455e-05,
"loss": 1.3364,
"step": 1380
},
{
"epoch": 2.161930712339432,
"grad_norm": 0.47724074125289917,
"learning_rate": 5.90486604702023e-05,
"loss": 1.3365,
"step": 1390
},
{
"epoch": 2.1775009731413,
"grad_norm": 0.49025580286979675,
"learning_rate": 5.7955166757791146e-05,
"loss": 1.3243,
"step": 1400
},
{
"epoch": 2.1930712339431686,
"grad_norm": 0.551092267036438,
"learning_rate": 5.686167304537999e-05,
"loss": 1.3865,
"step": 1410
},
{
"epoch": 2.208641494745037,
"grad_norm": 0.4625059962272644,
"learning_rate": 5.5768179332968836e-05,
"loss": 1.3203,
"step": 1420
},
{
"epoch": 2.2242117555469054,
"grad_norm": 0.4594584107398987,
"learning_rate": 5.467468562055769e-05,
"loss": 1.3088,
"step": 1430
},
{
"epoch": 2.239782016348774,
"grad_norm": 0.45681628584861755,
"learning_rate": 5.3581191908146534e-05,
"loss": 1.3675,
"step": 1440
},
{
"epoch": 2.2553522771506422,
"grad_norm": 0.4665955901145935,
"learning_rate": 5.248769819573538e-05,
"loss": 1.3608,
"step": 1450
},
{
"epoch": 2.2709225379525106,
"grad_norm": 0.5088324546813965,
"learning_rate": 5.1394204483324224e-05,
"loss": 1.3386,
"step": 1460
},
{
"epoch": 2.286492798754379,
"grad_norm": 0.45360425114631653,
"learning_rate": 5.0300710770913076e-05,
"loss": 1.3568,
"step": 1470
},
{
"epoch": 2.3020630595562475,
"grad_norm": 0.4933008551597595,
"learning_rate": 4.9207217058501915e-05,
"loss": 1.3041,
"step": 1480
},
{
"epoch": 2.317633320358116,
"grad_norm": 0.45595404505729675,
"learning_rate": 4.811372334609076e-05,
"loss": 1.3012,
"step": 1490
},
{
"epoch": 2.3332035811599843,
"grad_norm": 0.49661508202552795,
"learning_rate": 4.7020229633679605e-05,
"loss": 1.3304,
"step": 1500
},
{
"epoch": 2.3332035811599843,
"eval_loss": 1.4234389066696167,
"eval_runtime": 260.843,
"eval_samples_per_second": 9.849,
"eval_steps_per_second": 1.234,
"step": 1500
},
{
"epoch": 2.3487738419618527,
"grad_norm": 0.4981136918067932,
"learning_rate": 4.592673592126846e-05,
"loss": 1.3316,
"step": 1510
},
{
"epoch": 2.364344102763721,
"grad_norm": 0.4820728898048401,
"learning_rate": 4.48332422088573e-05,
"loss": 1.3227,
"step": 1520
},
{
"epoch": 2.3799143635655895,
"grad_norm": 0.5209029912948608,
"learning_rate": 4.373974849644615e-05,
"loss": 1.3635,
"step": 1530
},
{
"epoch": 2.3954846243674583,
"grad_norm": 0.493643194437027,
"learning_rate": 4.264625478403499e-05,
"loss": 1.3084,
"step": 1540
},
{
"epoch": 2.4110548851693268,
"grad_norm": 0.4825109839439392,
"learning_rate": 4.155276107162384e-05,
"loss": 1.3758,
"step": 1550
},
{
"epoch": 2.426625145971195,
"grad_norm": 0.5229203104972839,
"learning_rate": 4.045926735921269e-05,
"loss": 1.3421,
"step": 1560
},
{
"epoch": 2.4421954067730636,
"grad_norm": 0.4592013657093048,
"learning_rate": 3.9365773646801536e-05,
"loss": 1.331,
"step": 1570
},
{
"epoch": 2.457765667574932,
"grad_norm": 0.44231197237968445,
"learning_rate": 3.827227993439038e-05,
"loss": 1.3215,
"step": 1580
},
{
"epoch": 2.4733359283768004,
"grad_norm": 0.5239751935005188,
"learning_rate": 3.717878622197922e-05,
"loss": 1.295,
"step": 1590
},
{
"epoch": 2.488906189178669,
"grad_norm": 0.5024131536483765,
"learning_rate": 3.608529250956807e-05,
"loss": 1.3301,
"step": 1600
},
{
"epoch": 2.504476449980537,
"grad_norm": 0.4963117837905884,
"learning_rate": 3.499179879715692e-05,
"loss": 1.3481,
"step": 1610
},
{
"epoch": 2.5200467107824056,
"grad_norm": 0.5025255084037781,
"learning_rate": 3.389830508474576e-05,
"loss": 1.3549,
"step": 1620
},
{
"epoch": 2.535616971584274,
"grad_norm": 0.527905285358429,
"learning_rate": 3.280481137233461e-05,
"loss": 1.3592,
"step": 1630
},
{
"epoch": 2.5511872323861424,
"grad_norm": 0.49054473638534546,
"learning_rate": 3.171131765992346e-05,
"loss": 1.3023,
"step": 1640
},
{
"epoch": 2.566757493188011,
"grad_norm": 0.5385321974754333,
"learning_rate": 3.0617823947512305e-05,
"loss": 1.3427,
"step": 1650
},
{
"epoch": 2.5823277539898792,
"grad_norm": 0.49234816431999207,
"learning_rate": 2.952433023510115e-05,
"loss": 1.3358,
"step": 1660
},
{
"epoch": 2.5978980147917476,
"grad_norm": 0.4959504008293152,
"learning_rate": 2.8430836522689995e-05,
"loss": 1.3266,
"step": 1670
},
{
"epoch": 2.613468275593616,
"grad_norm": 0.44804513454437256,
"learning_rate": 2.7337342810278844e-05,
"loss": 1.3672,
"step": 1680
},
{
"epoch": 2.6290385363954845,
"grad_norm": 0.5712561011314392,
"learning_rate": 2.624384909786769e-05,
"loss": 1.356,
"step": 1690
},
{
"epoch": 2.6446087971973533,
"grad_norm": 0.4535830616950989,
"learning_rate": 2.5150355385456538e-05,
"loss": 1.2911,
"step": 1700
},
{
"epoch": 2.6601790579992217,
"grad_norm": 0.46494343876838684,
"learning_rate": 2.405686167304538e-05,
"loss": 1.297,
"step": 1710
},
{
"epoch": 2.67574931880109,
"grad_norm": 0.5322738885879517,
"learning_rate": 2.296336796063423e-05,
"loss": 1.3409,
"step": 1720
},
{
"epoch": 2.6913195796029585,
"grad_norm": 0.5388643145561218,
"learning_rate": 2.1869874248223074e-05,
"loss": 1.2991,
"step": 1730
},
{
"epoch": 2.706889840404827,
"grad_norm": 0.4668332040309906,
"learning_rate": 2.077638053581192e-05,
"loss": 1.3082,
"step": 1740
},
{
"epoch": 2.7224601012066953,
"grad_norm": 0.493221640586853,
"learning_rate": 1.9682886823400768e-05,
"loss": 1.289,
"step": 1750
},
{
"epoch": 2.7380303620085638,
"grad_norm": 0.49574050307273865,
"learning_rate": 1.858939311098961e-05,
"loss": 1.2815,
"step": 1760
},
{
"epoch": 2.753600622810432,
"grad_norm": 0.5505498647689819,
"learning_rate": 1.749589939857846e-05,
"loss": 1.3711,
"step": 1770
},
{
"epoch": 2.7691708836123006,
"grad_norm": 0.5239655375480652,
"learning_rate": 1.6402405686167304e-05,
"loss": 1.3717,
"step": 1780
},
{
"epoch": 2.784741144414169,
"grad_norm": 0.50331050157547,
"learning_rate": 1.5308911973756152e-05,
"loss": 1.3082,
"step": 1790
},
{
"epoch": 2.8003114052160374,
"grad_norm": 0.5316745638847351,
"learning_rate": 1.4215418261344998e-05,
"loss": 1.3147,
"step": 1800
},
{
"epoch": 2.815881666017906,
"grad_norm": 0.49300023913383484,
"learning_rate": 1.3121924548933845e-05,
"loss": 1.3304,
"step": 1810
},
{
"epoch": 2.831451926819774,
"grad_norm": 0.5078563690185547,
"learning_rate": 1.202843083652269e-05,
"loss": 1.3342,
"step": 1820
},
{
"epoch": 2.8470221876216426,
"grad_norm": 0.4983169734477997,
"learning_rate": 1.0934937124111537e-05,
"loss": 1.3854,
"step": 1830
},
{
"epoch": 2.862592448423511,
"grad_norm": 0.5032157897949219,
"learning_rate": 9.841443411700384e-06,
"loss": 1.3383,
"step": 1840
},
{
"epoch": 2.8781627092253794,
"grad_norm": 0.5086650252342224,
"learning_rate": 8.74794969928923e-06,
"loss": 1.3179,
"step": 1850
},
{
"epoch": 2.893732970027248,
"grad_norm": 0.4910889267921448,
"learning_rate": 7.654455986878076e-06,
"loss": 1.3241,
"step": 1860
},
{
"epoch": 2.9093032308291162,
"grad_norm": 0.5080595016479492,
"learning_rate": 6.560962274466922e-06,
"loss": 1.3179,
"step": 1870
},
{
"epoch": 2.9248734916309846,
"grad_norm": 0.5309430956840515,
"learning_rate": 5.4674685620557685e-06,
"loss": 1.3617,
"step": 1880
},
{
"epoch": 2.940443752432853,
"grad_norm": 0.5051539540290833,
"learning_rate": 4.373974849644615e-06,
"loss": 1.3573,
"step": 1890
},
{
"epoch": 2.9560140132347215,
"grad_norm": 0.4643094837665558,
"learning_rate": 3.280481137233461e-06,
"loss": 1.3074,
"step": 1900
},
{
"epoch": 2.97158427403659,
"grad_norm": 0.4756477177143097,
"learning_rate": 2.1869874248223073e-06,
"loss": 1.3093,
"step": 1910
},
{
"epoch": 2.9871545348384583,
"grad_norm": 0.4663808047771454,
"learning_rate": 1.0934937124111537e-06,
"loss": 1.3229,
"step": 1920
}
],
"logging_steps": 10,
"max_steps": 1929,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5420831362659123e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}