SmolLM2-1.7B-Espirita / checkpoint-1000\trainer_state.json
elissoncardoso1's picture
Upload checkpoint-1000\trainer_state.json with huggingface_hub
eb77ff0 verified
{
"best_global_step": 1000,
"best_metric": 1.4522794485092163,
"best_model_checkpoint": "./finetuned_model\\checkpoint-1000",
"epoch": 1.555858310626703,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015570260801868432,
"grad_norm": 0.20217418670654297,
"learning_rate": 1.8e-05,
"loss": 2.2788,
"step": 10
},
{
"epoch": 0.031140521603736863,
"grad_norm": 0.21721592545509338,
"learning_rate": 3.8e-05,
"loss": 2.2974,
"step": 20
},
{
"epoch": 0.04671078240560529,
"grad_norm": 0.18691149353981018,
"learning_rate": 5.8e-05,
"loss": 2.2254,
"step": 30
},
{
"epoch": 0.06228104320747373,
"grad_norm": 0.2627484202384949,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1722,
"step": 40
},
{
"epoch": 0.07785130400934216,
"grad_norm": 0.4096653461456299,
"learning_rate": 9.8e-05,
"loss": 2.0316,
"step": 50
},
{
"epoch": 0.09342156481121058,
"grad_norm": 2.0437684059143066,
"learning_rate": 0.000118,
"loss": 1.8762,
"step": 60
},
{
"epoch": 0.10899182561307902,
"grad_norm": 0.2537792921066284,
"learning_rate": 0.000138,
"loss": 1.8509,
"step": 70
},
{
"epoch": 0.12456208641494745,
"grad_norm": 0.25586166977882385,
"learning_rate": 0.00015800000000000002,
"loss": 1.7,
"step": 80
},
{
"epoch": 0.1401323472168159,
"grad_norm": 0.2822591960430145,
"learning_rate": 0.00017800000000000002,
"loss": 1.7905,
"step": 90
},
{
"epoch": 0.15570260801868432,
"grad_norm": 0.2682412564754486,
"learning_rate": 0.00019800000000000002,
"loss": 1.7157,
"step": 100
},
{
"epoch": 0.17127286882055273,
"grad_norm": 0.30413559079170227,
"learning_rate": 0.00019901585565882995,
"loss": 1.6886,
"step": 110
},
{
"epoch": 0.18684312962242117,
"grad_norm": 0.30645519495010376,
"learning_rate": 0.00019792236194641883,
"loss": 1.6956,
"step": 120
},
{
"epoch": 0.2024133904242896,
"grad_norm": 0.3002190589904785,
"learning_rate": 0.00019682886823400766,
"loss": 1.6521,
"step": 130
},
{
"epoch": 0.21798365122615804,
"grad_norm": 0.29828041791915894,
"learning_rate": 0.0001957353745215965,
"loss": 1.6584,
"step": 140
},
{
"epoch": 0.23355391202802647,
"grad_norm": 0.301546573638916,
"learning_rate": 0.00019464188080918536,
"loss": 1.6613,
"step": 150
},
{
"epoch": 0.2491241728298949,
"grad_norm": 0.347465842962265,
"learning_rate": 0.00019354838709677422,
"loss": 1.6284,
"step": 160
},
{
"epoch": 0.2646944336317633,
"grad_norm": 0.31749042868614197,
"learning_rate": 0.00019245489338436304,
"loss": 1.5873,
"step": 170
},
{
"epoch": 0.2802646944336318,
"grad_norm": 0.32523462176322937,
"learning_rate": 0.0001913613996719519,
"loss": 1.5703,
"step": 180
},
{
"epoch": 0.2958349552355002,
"grad_norm": 0.3478928804397583,
"learning_rate": 0.00019026790595954074,
"loss": 1.6148,
"step": 190
},
{
"epoch": 0.31140521603736865,
"grad_norm": 0.3863014578819275,
"learning_rate": 0.0001891744122471296,
"loss": 1.6695,
"step": 200
},
{
"epoch": 0.32697547683923706,
"grad_norm": 0.31040167808532715,
"learning_rate": 0.00018808091853471842,
"loss": 1.636,
"step": 210
},
{
"epoch": 0.34254573764110546,
"grad_norm": 0.34434080123901367,
"learning_rate": 0.0001869874248223073,
"loss": 1.6309,
"step": 220
},
{
"epoch": 0.3581159984429739,
"grad_norm": 0.35271912813186646,
"learning_rate": 0.00018589393110989613,
"loss": 1.5183,
"step": 230
},
{
"epoch": 0.37368625924484233,
"grad_norm": 0.3652435839176178,
"learning_rate": 0.00018480043739748498,
"loss": 1.5748,
"step": 240
},
{
"epoch": 0.3892565200467108,
"grad_norm": 0.3423960208892822,
"learning_rate": 0.00018370694368507383,
"loss": 1.6099,
"step": 250
},
{
"epoch": 0.4048267808485792,
"grad_norm": 0.3742424249649048,
"learning_rate": 0.00018261344997266268,
"loss": 1.6456,
"step": 260
},
{
"epoch": 0.42039704165044767,
"grad_norm": 0.36541569232940674,
"learning_rate": 0.0001815199562602515,
"loss": 1.5256,
"step": 270
},
{
"epoch": 0.4359673024523161,
"grad_norm": 0.32436609268188477,
"learning_rate": 0.00018042646254784036,
"loss": 1.6154,
"step": 280
},
{
"epoch": 0.4515375632541845,
"grad_norm": 0.3482036888599396,
"learning_rate": 0.0001793329688354292,
"loss": 1.5598,
"step": 290
},
{
"epoch": 0.46710782405605294,
"grad_norm": 0.32296016812324524,
"learning_rate": 0.00017823947512301804,
"loss": 1.557,
"step": 300
},
{
"epoch": 0.48267808485792135,
"grad_norm": 0.3758240044116974,
"learning_rate": 0.0001771459814106069,
"loss": 1.5237,
"step": 310
},
{
"epoch": 0.4982483456597898,
"grad_norm": 0.3592066466808319,
"learning_rate": 0.00017605248769819574,
"loss": 1.5331,
"step": 320
},
{
"epoch": 0.5138186064616582,
"grad_norm": 0.3503170907497406,
"learning_rate": 0.0001749589939857846,
"loss": 1.5598,
"step": 330
},
{
"epoch": 0.5293888672635266,
"grad_norm": 0.3589423894882202,
"learning_rate": 0.00017386550027337342,
"loss": 1.5694,
"step": 340
},
{
"epoch": 0.5449591280653951,
"grad_norm": 0.4173847436904907,
"learning_rate": 0.0001727720065609623,
"loss": 1.6035,
"step": 350
},
{
"epoch": 0.5605293888672636,
"grad_norm": 0.3429367244243622,
"learning_rate": 0.00017167851284855112,
"loss": 1.5636,
"step": 360
},
{
"epoch": 0.576099649669132,
"grad_norm": 0.3459906280040741,
"learning_rate": 0.00017058501913613997,
"loss": 1.4638,
"step": 370
},
{
"epoch": 0.5916699104710004,
"grad_norm": 0.36562731862068176,
"learning_rate": 0.00016949152542372882,
"loss": 1.5236,
"step": 380
},
{
"epoch": 0.6072401712728688,
"grad_norm": 0.4281690716743469,
"learning_rate": 0.00016839803171131768,
"loss": 1.5328,
"step": 390
},
{
"epoch": 0.6228104320747373,
"grad_norm": 0.3289957642555237,
"learning_rate": 0.0001673045379989065,
"loss": 1.5511,
"step": 400
},
{
"epoch": 0.6383806928766057,
"grad_norm": 0.34759992361068726,
"learning_rate": 0.00016621104428649535,
"loss": 1.5047,
"step": 410
},
{
"epoch": 0.6539509536784741,
"grad_norm": 0.36279717087745667,
"learning_rate": 0.0001651175505740842,
"loss": 1.5314,
"step": 420
},
{
"epoch": 0.6695212144803425,
"grad_norm": 0.3549306094646454,
"learning_rate": 0.00016402405686167306,
"loss": 1.5158,
"step": 430
},
{
"epoch": 0.6850914752822109,
"grad_norm": 0.37329429388046265,
"learning_rate": 0.00016293056314926188,
"loss": 1.4927,
"step": 440
},
{
"epoch": 0.7006617360840794,
"grad_norm": 0.3531767427921295,
"learning_rate": 0.00016183706943685076,
"loss": 1.5568,
"step": 450
},
{
"epoch": 0.7162319968859479,
"grad_norm": 0.3837789297103882,
"learning_rate": 0.0001607435757244396,
"loss": 1.5042,
"step": 460
},
{
"epoch": 0.7318022576878163,
"grad_norm": 0.3604554533958435,
"learning_rate": 0.00015965008201202844,
"loss": 1.5362,
"step": 470
},
{
"epoch": 0.7473725184896847,
"grad_norm": 0.4049264192581177,
"learning_rate": 0.0001585565882996173,
"loss": 1.5754,
"step": 480
},
{
"epoch": 0.7629427792915532,
"grad_norm": 0.3917747139930725,
"learning_rate": 0.00015746309458720614,
"loss": 1.5257,
"step": 490
},
{
"epoch": 0.7785130400934216,
"grad_norm": 0.37551721930503845,
"learning_rate": 0.00015636960087479497,
"loss": 1.5712,
"step": 500
},
{
"epoch": 0.7785130400934216,
"eval_loss": 1.5167639255523682,
"eval_runtime": 260.2191,
"eval_samples_per_second": 9.872,
"eval_steps_per_second": 1.237,
"step": 500
},
{
"epoch": 0.79408330089529,
"grad_norm": 0.4406639635562897,
"learning_rate": 0.00015527610716238382,
"loss": 1.5319,
"step": 510
},
{
"epoch": 0.8096535616971584,
"grad_norm": 0.37547779083251953,
"learning_rate": 0.00015418261344997267,
"loss": 1.535,
"step": 520
},
{
"epoch": 0.8252238224990268,
"grad_norm": 0.3571765422821045,
"learning_rate": 0.00015308911973756152,
"loss": 1.4872,
"step": 530
},
{
"epoch": 0.8407940833008953,
"grad_norm": 0.36505362391471863,
"learning_rate": 0.00015199562602515035,
"loss": 1.5346,
"step": 540
},
{
"epoch": 0.8563643441027637,
"grad_norm": 0.34871625900268555,
"learning_rate": 0.00015090213231273923,
"loss": 1.5243,
"step": 550
},
{
"epoch": 0.8719346049046321,
"grad_norm": 0.374802827835083,
"learning_rate": 0.00014980863860032805,
"loss": 1.5031,
"step": 560
},
{
"epoch": 0.8875048657065006,
"grad_norm": 0.41518205404281616,
"learning_rate": 0.00014871514488791688,
"loss": 1.5265,
"step": 570
},
{
"epoch": 0.903075126508369,
"grad_norm": 0.3830599784851074,
"learning_rate": 0.00014762165117550576,
"loss": 1.5212,
"step": 580
},
{
"epoch": 0.9186453873102375,
"grad_norm": 0.37755969166755676,
"learning_rate": 0.00014652815746309458,
"loss": 1.49,
"step": 590
},
{
"epoch": 0.9342156481121059,
"grad_norm": 0.3936685621738434,
"learning_rate": 0.00014543466375068343,
"loss": 1.5361,
"step": 600
},
{
"epoch": 0.9497859089139743,
"grad_norm": 0.35126620531082153,
"learning_rate": 0.0001443411700382723,
"loss": 1.5182,
"step": 610
},
{
"epoch": 0.9653561697158427,
"grad_norm": 0.4049900770187378,
"learning_rate": 0.00014324767632586114,
"loss": 1.5134,
"step": 620
},
{
"epoch": 0.9809264305177112,
"grad_norm": 0.39619603753089905,
"learning_rate": 0.00014215418261344996,
"loss": 1.4918,
"step": 630
},
{
"epoch": 0.9964966913195796,
"grad_norm": 0.38389134407043457,
"learning_rate": 0.00014106068890103882,
"loss": 1.5066,
"step": 640
},
{
"epoch": 1.0108991825613078,
"grad_norm": 0.3816758096218109,
"learning_rate": 0.00013996719518862767,
"loss": 1.4342,
"step": 650
},
{
"epoch": 1.0264694433631762,
"grad_norm": 0.3684230446815491,
"learning_rate": 0.00013887370147621652,
"loss": 1.509,
"step": 660
},
{
"epoch": 1.0420397041650449,
"grad_norm": 0.4102369248867035,
"learning_rate": 0.00013778020776380534,
"loss": 1.4011,
"step": 670
},
{
"epoch": 1.0576099649669133,
"grad_norm": 0.40174803137779236,
"learning_rate": 0.00013668671405139422,
"loss": 1.4155,
"step": 680
},
{
"epoch": 1.0731802257687817,
"grad_norm": 0.4212823510169983,
"learning_rate": 0.00013559322033898305,
"loss": 1.4078,
"step": 690
},
{
"epoch": 1.08875048657065,
"grad_norm": 0.41347914934158325,
"learning_rate": 0.0001344997266265719,
"loss": 1.4245,
"step": 700
},
{
"epoch": 1.1043207473725185,
"grad_norm": 0.3938431441783905,
"learning_rate": 0.00013340623291416075,
"loss": 1.3777,
"step": 710
},
{
"epoch": 1.119891008174387,
"grad_norm": 0.4172612130641937,
"learning_rate": 0.0001323127392017496,
"loss": 1.4435,
"step": 720
},
{
"epoch": 1.1354612689762553,
"grad_norm": 0.4305002987384796,
"learning_rate": 0.00013121924548933843,
"loss": 1.479,
"step": 730
},
{
"epoch": 1.1510315297781237,
"grad_norm": 0.4031375050544739,
"learning_rate": 0.0001301257517769273,
"loss": 1.4107,
"step": 740
},
{
"epoch": 1.1666017905799921,
"grad_norm": 0.4102884829044342,
"learning_rate": 0.00012903225806451613,
"loss": 1.5123,
"step": 750
},
{
"epoch": 1.1821720513818605,
"grad_norm": 0.44275203347206116,
"learning_rate": 0.00012793876435210499,
"loss": 1.426,
"step": 760
},
{
"epoch": 1.1977423121837292,
"grad_norm": 0.4467061758041382,
"learning_rate": 0.00012684527063969384,
"loss": 1.4495,
"step": 770
},
{
"epoch": 1.2133125729855976,
"grad_norm": 0.40034279227256775,
"learning_rate": 0.0001257517769272827,
"loss": 1.4541,
"step": 780
},
{
"epoch": 1.228882833787466,
"grad_norm": 0.42505356669425964,
"learning_rate": 0.00012465828321487152,
"loss": 1.4767,
"step": 790
},
{
"epoch": 1.2444530945893344,
"grad_norm": 0.3754604458808899,
"learning_rate": 0.00012356478950246037,
"loss": 1.4144,
"step": 800
},
{
"epoch": 1.2600233553912028,
"grad_norm": 0.3871001601219177,
"learning_rate": 0.00012247129579004922,
"loss": 1.4618,
"step": 810
},
{
"epoch": 1.2755936161930712,
"grad_norm": 0.416062593460083,
"learning_rate": 0.00012137780207763807,
"loss": 1.4258,
"step": 820
},
{
"epoch": 1.2911638769949396,
"grad_norm": 0.3869543969631195,
"learning_rate": 0.00012028430836522691,
"loss": 1.379,
"step": 830
},
{
"epoch": 1.306734137796808,
"grad_norm": 0.4507176876068115,
"learning_rate": 0.00011919081465281574,
"loss": 1.4011,
"step": 840
},
{
"epoch": 1.3223043985986767,
"grad_norm": 0.4298593997955322,
"learning_rate": 0.0001180973209404046,
"loss": 1.3897,
"step": 850
},
{
"epoch": 1.337874659400545,
"grad_norm": 0.4016563296318054,
"learning_rate": 0.00011700382722799344,
"loss": 1.4004,
"step": 860
},
{
"epoch": 1.3534449202024135,
"grad_norm": 0.45489302277565,
"learning_rate": 0.00011591033351558229,
"loss": 1.3974,
"step": 870
},
{
"epoch": 1.3690151810042819,
"grad_norm": 0.4141370356082916,
"learning_rate": 0.00011481683980317113,
"loss": 1.4417,
"step": 880
},
{
"epoch": 1.3845854418061503,
"grad_norm": 0.4654589295387268,
"learning_rate": 0.00011372334609075998,
"loss": 1.479,
"step": 890
},
{
"epoch": 1.4001557026080187,
"grad_norm": 0.4696764051914215,
"learning_rate": 0.00011262985237834882,
"loss": 1.4009,
"step": 900
},
{
"epoch": 1.415725963409887,
"grad_norm": 0.4807955324649811,
"learning_rate": 0.00011153635866593767,
"loss": 1.3897,
"step": 910
},
{
"epoch": 1.4312962242117555,
"grad_norm": 0.5069774985313416,
"learning_rate": 0.00011044286495352651,
"loss": 1.431,
"step": 920
},
{
"epoch": 1.446866485013624,
"grad_norm": 0.43647122383117676,
"learning_rate": 0.00010934937124111538,
"loss": 1.4042,
"step": 930
},
{
"epoch": 1.4624367458154923,
"grad_norm": 0.43918347358703613,
"learning_rate": 0.0001082558775287042,
"loss": 1.4403,
"step": 940
},
{
"epoch": 1.4780070066173607,
"grad_norm": 0.39581167697906494,
"learning_rate": 0.00010716238381629307,
"loss": 1.4027,
"step": 950
},
{
"epoch": 1.4935772674192291,
"grad_norm": 0.42234891653060913,
"learning_rate": 0.0001060688901038819,
"loss": 1.4291,
"step": 960
},
{
"epoch": 1.5091475282210975,
"grad_norm": 0.40230894088745117,
"learning_rate": 0.00010497539639147076,
"loss": 1.3847,
"step": 970
},
{
"epoch": 1.5247177890229662,
"grad_norm": 0.41914451122283936,
"learning_rate": 0.0001038819026790596,
"loss": 1.364,
"step": 980
},
{
"epoch": 1.5402880498248346,
"grad_norm": 0.4406910240650177,
"learning_rate": 0.00010278840896664845,
"loss": 1.4437,
"step": 990
},
{
"epoch": 1.555858310626703,
"grad_norm": 0.4232882857322693,
"learning_rate": 0.00010169491525423729,
"loss": 1.4319,
"step": 1000
},
{
"epoch": 1.555858310626703,
"eval_loss": 1.4522794485092163,
"eval_runtime": 260.6355,
"eval_samples_per_second": 9.857,
"eval_steps_per_second": 1.235,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 1929,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.998377278046208e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}