aleegis09's picture
Training in progress, step 50, checkpoint
fa62827 verified
raw
history blame
10 kB
{
"best_metric": 11.919342994689941,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.09103322712790168,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0018206645425580337,
"grad_norm": 0.03413726016879082,
"learning_rate": 1e-05,
"loss": 11.9323,
"step": 1
},
{
"epoch": 0.0018206645425580337,
"eval_loss": 11.93479061126709,
"eval_runtime": 5.058,
"eval_samples_per_second": 182.88,
"eval_steps_per_second": 45.868,
"step": 1
},
{
"epoch": 0.0036413290851160674,
"grad_norm": 0.04938232898712158,
"learning_rate": 2e-05,
"loss": 11.9373,
"step": 2
},
{
"epoch": 0.005461993627674101,
"grad_norm": 0.03765185922384262,
"learning_rate": 3e-05,
"loss": 11.9313,
"step": 3
},
{
"epoch": 0.007282658170232135,
"grad_norm": 0.03305572271347046,
"learning_rate": 4e-05,
"loss": 11.9374,
"step": 4
},
{
"epoch": 0.009103322712790168,
"grad_norm": 0.04273687303066254,
"learning_rate": 5e-05,
"loss": 11.9327,
"step": 5
},
{
"epoch": 0.010923987255348202,
"grad_norm": 0.04251832887530327,
"learning_rate": 6e-05,
"loss": 11.9344,
"step": 6
},
{
"epoch": 0.012744651797906235,
"grad_norm": 0.04986019805073738,
"learning_rate": 7e-05,
"loss": 11.9337,
"step": 7
},
{
"epoch": 0.01456531634046427,
"grad_norm": 0.0382511280477047,
"learning_rate": 8e-05,
"loss": 11.9358,
"step": 8
},
{
"epoch": 0.016385980883022302,
"grad_norm": 0.04335345700383186,
"learning_rate": 9e-05,
"loss": 11.9329,
"step": 9
},
{
"epoch": 0.018206645425580335,
"grad_norm": 0.043804410845041275,
"learning_rate": 0.0001,
"loss": 11.9339,
"step": 10
},
{
"epoch": 0.02002730996813837,
"grad_norm": 0.050628215074539185,
"learning_rate": 9.999316524962345e-05,
"loss": 11.9351,
"step": 11
},
{
"epoch": 0.021847974510696404,
"grad_norm": 0.043292414397001266,
"learning_rate": 9.997266286704631e-05,
"loss": 11.9353,
"step": 12
},
{
"epoch": 0.023668639053254437,
"grad_norm": 0.05560588464140892,
"learning_rate": 9.993849845741524e-05,
"loss": 11.9345,
"step": 13
},
{
"epoch": 0.02548930359581247,
"grad_norm": 0.06867741793394089,
"learning_rate": 9.989068136093873e-05,
"loss": 11.9322,
"step": 14
},
{
"epoch": 0.027309968138370506,
"grad_norm": 0.07315599173307419,
"learning_rate": 9.98292246503335e-05,
"loss": 11.9318,
"step": 15
},
{
"epoch": 0.02913063268092854,
"grad_norm": 0.07529256492853165,
"learning_rate": 9.975414512725057e-05,
"loss": 11.9328,
"step": 16
},
{
"epoch": 0.030951297223486572,
"grad_norm": 0.07898218929767609,
"learning_rate": 9.966546331768191e-05,
"loss": 11.9336,
"step": 17
},
{
"epoch": 0.032771961766044605,
"grad_norm": 0.08530876040458679,
"learning_rate": 9.956320346634876e-05,
"loss": 11.9336,
"step": 18
},
{
"epoch": 0.03459262630860264,
"grad_norm": 0.08568832278251648,
"learning_rate": 9.944739353007344e-05,
"loss": 11.932,
"step": 19
},
{
"epoch": 0.03641329085116067,
"grad_norm": 0.10579682141542435,
"learning_rate": 9.931806517013612e-05,
"loss": 11.9316,
"step": 20
},
{
"epoch": 0.03823395539371871,
"grad_norm": 0.11739271134138107,
"learning_rate": 9.917525374361912e-05,
"loss": 11.9312,
"step": 21
},
{
"epoch": 0.04005461993627674,
"grad_norm": 0.11046414822340012,
"learning_rate": 9.901899829374047e-05,
"loss": 11.9301,
"step": 22
},
{
"epoch": 0.041875284478834776,
"grad_norm": 0.1442708969116211,
"learning_rate": 9.884934153917997e-05,
"loss": 11.9328,
"step": 23
},
{
"epoch": 0.04369594902139281,
"grad_norm": 0.14422491192817688,
"learning_rate": 9.86663298624003e-05,
"loss": 11.9277,
"step": 24
},
{
"epoch": 0.04551661356395084,
"grad_norm": 0.16111890971660614,
"learning_rate": 9.847001329696653e-05,
"loss": 11.9291,
"step": 25
},
{
"epoch": 0.047337278106508875,
"grad_norm": 0.13617178797721863,
"learning_rate": 9.826044551386744e-05,
"loss": 11.9292,
"step": 26
},
{
"epoch": 0.04915794264906691,
"grad_norm": 0.10961776971817017,
"learning_rate": 9.803768380684242e-05,
"loss": 11.9289,
"step": 27
},
{
"epoch": 0.05097860719162494,
"grad_norm": 0.13653425872325897,
"learning_rate": 9.780178907671789e-05,
"loss": 11.9262,
"step": 28
},
{
"epoch": 0.05279927173418298,
"grad_norm": 0.1738469898700714,
"learning_rate": 9.755282581475769e-05,
"loss": 11.923,
"step": 29
},
{
"epoch": 0.05461993627674101,
"grad_norm": 0.13671448826789856,
"learning_rate": 9.729086208503174e-05,
"loss": 11.924,
"step": 30
},
{
"epoch": 0.056440600819299046,
"grad_norm": 0.16584277153015137,
"learning_rate": 9.701596950580806e-05,
"loss": 11.9274,
"step": 31
},
{
"epoch": 0.05826126536185708,
"grad_norm": 0.1861305832862854,
"learning_rate": 9.672822322997305e-05,
"loss": 11.9206,
"step": 32
},
{
"epoch": 0.06008192990441511,
"grad_norm": 0.23096109926700592,
"learning_rate": 9.642770192448536e-05,
"loss": 11.9235,
"step": 33
},
{
"epoch": 0.061902594446973144,
"grad_norm": 0.1698852926492691,
"learning_rate": 9.611448774886924e-05,
"loss": 11.919,
"step": 34
},
{
"epoch": 0.06372325898953118,
"grad_norm": 0.13847710192203522,
"learning_rate": 9.578866633275288e-05,
"loss": 11.9193,
"step": 35
},
{
"epoch": 0.06554392353208921,
"grad_norm": 0.20390431582927704,
"learning_rate": 9.545032675245813e-05,
"loss": 11.9244,
"step": 36
},
{
"epoch": 0.06736458807464725,
"grad_norm": 0.13347230851650238,
"learning_rate": 9.509956150664796e-05,
"loss": 11.9165,
"step": 37
},
{
"epoch": 0.06918525261720528,
"grad_norm": 0.13313481211662292,
"learning_rate": 9.473646649103818e-05,
"loss": 11.9185,
"step": 38
},
{
"epoch": 0.07100591715976332,
"grad_norm": 0.14947770535945892,
"learning_rate": 9.43611409721806e-05,
"loss": 11.9168,
"step": 39
},
{
"epoch": 0.07282658170232134,
"grad_norm": 0.10729013383388519,
"learning_rate": 9.397368756032445e-05,
"loss": 11.9199,
"step": 40
},
{
"epoch": 0.07464724624487938,
"grad_norm": 0.09050336480140686,
"learning_rate": 9.357421218136386e-05,
"loss": 11.9181,
"step": 41
},
{
"epoch": 0.07646791078743742,
"grad_norm": 0.14425118267536163,
"learning_rate": 9.316282404787871e-05,
"loss": 11.9193,
"step": 42
},
{
"epoch": 0.07828857532999545,
"grad_norm": 0.09219750761985779,
"learning_rate": 9.273963562927695e-05,
"loss": 11.915,
"step": 43
},
{
"epoch": 0.08010923987255349,
"grad_norm": 0.18210937082767487,
"learning_rate": 9.230476262104677e-05,
"loss": 11.9149,
"step": 44
},
{
"epoch": 0.08192990441511151,
"grad_norm": 0.10966989398002625,
"learning_rate": 9.185832391312644e-05,
"loss": 11.9171,
"step": 45
},
{
"epoch": 0.08375056895766955,
"grad_norm": 0.14103111624717712,
"learning_rate": 9.140044155740101e-05,
"loss": 11.9131,
"step": 46
},
{
"epoch": 0.08557123350022758,
"grad_norm": 0.1283736675977707,
"learning_rate": 9.093124073433463e-05,
"loss": 11.9146,
"step": 47
},
{
"epoch": 0.08739189804278562,
"grad_norm": 0.16424375772476196,
"learning_rate": 9.045084971874738e-05,
"loss": 11.9164,
"step": 48
},
{
"epoch": 0.08921256258534364,
"grad_norm": 0.2161012589931488,
"learning_rate": 8.995939984474624e-05,
"loss": 11.9115,
"step": 49
},
{
"epoch": 0.09103322712790168,
"grad_norm": 0.2343643307685852,
"learning_rate": 8.945702546981969e-05,
"loss": 11.9071,
"step": 50
},
{
"epoch": 0.09103322712790168,
"eval_loss": 11.919342994689941,
"eval_runtime": 5.0387,
"eval_samples_per_second": 183.579,
"eval_steps_per_second": 46.044,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 241041408000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}