gr00t-bread1-pxdp2avmh1 / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
a2aab49 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 360,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2777777777777778,
"grad_norm": 1.9213794469833374,
"learning_rate": 5e-05,
"loss": 0.9704,
"step": 10
},
{
"epoch": 0.5555555555555556,
"grad_norm": 2.6781961917877197,
"learning_rate": 9.999789047591562e-05,
"loss": 0.4178,
"step": 20
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.419345498085022,
"learning_rate": 9.974496289936769e-05,
"loss": 0.354,
"step": 30
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.947891116142273,
"learning_rate": 9.90725746626209e-05,
"loss": 0.3275,
"step": 40
},
{
"epoch": 1.3888888888888888,
"grad_norm": 1.2710075378417969,
"learning_rate": 9.798639549376945e-05,
"loss": 0.2647,
"step": 50
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.4519425630569458,
"learning_rate": 9.64955842986544e-05,
"loss": 0.2325,
"step": 60
},
{
"epoch": 1.9444444444444444,
"grad_norm": 1.5266393423080444,
"learning_rate": 9.46127119309197e-05,
"loss": 0.2325,
"step": 70
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.442086100578308,
"learning_rate": 9.23536551917611e-05,
"loss": 0.2141,
"step": 80
},
{
"epoch": 2.5,
"grad_norm": 1.3214430809020996,
"learning_rate": 8.9737462953185e-05,
"loss": 0.1961,
"step": 90
},
{
"epoch": 2.7777777777777777,
"grad_norm": 1.3879374265670776,
"learning_rate": 8.678619553365659e-05,
"loss": 0.1953,
"step": 100
},
{
"epoch": 3.0555555555555554,
"grad_norm": 1.3300167322158813,
"learning_rate": 8.352473868055746e-05,
"loss": 0.1732,
"step": 110
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.8237183690071106,
"learning_rate": 7.998059372799409e-05,
"loss": 0.1702,
"step": 120
},
{
"epoch": 3.611111111111111,
"grad_norm": 1.216146469116211,
"learning_rate": 7.618364569939391e-05,
"loss": 0.1519,
"step": 130
},
{
"epoch": 3.888888888888889,
"grad_norm": 0.9486384987831116,
"learning_rate": 7.21659113102993e-05,
"loss": 0.1566,
"step": 140
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.7426595687866211,
"learning_rate": 6.796126899625688e-05,
"loss": 0.1495,
"step": 150
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.8088931441307068,
"learning_rate": 6.360517324226676e-05,
"loss": 0.1478,
"step": 160
},
{
"epoch": 4.722222222222222,
"grad_norm": 1.0129106044769287,
"learning_rate": 5.9134355622630356e-05,
"loss": 0.1327,
"step": 170
},
{
"epoch": 5.0,
"grad_norm": 0.8650373220443726,
"learning_rate": 5.458651507209518e-05,
"loss": 0.1381,
"step": 180
},
{
"epoch": 5.277777777777778,
"grad_norm": 0.7770701050758362,
"learning_rate": 5e-05,
"loss": 0.1403,
"step": 190
},
{
"epoch": 5.555555555555555,
"grad_norm": 0.7225720286369324,
"learning_rate": 4.541348492790482e-05,
"loss": 0.1359,
"step": 200
},
{
"epoch": 5.833333333333333,
"grad_norm": 1.0390045642852783,
"learning_rate": 4.086564437736966e-05,
"loss": 0.1274,
"step": 210
},
{
"epoch": 6.111111111111111,
"grad_norm": 1.121009349822998,
"learning_rate": 3.639482675773324e-05,
"loss": 0.1117,
"step": 220
},
{
"epoch": 6.388888888888889,
"grad_norm": 0.7137916088104248,
"learning_rate": 3.203873100374314e-05,
"loss": 0.1101,
"step": 230
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.5132378935813904,
"learning_rate": 2.783408868970071e-05,
"loss": 0.1157,
"step": 240
},
{
"epoch": 6.944444444444445,
"grad_norm": 0.5895885229110718,
"learning_rate": 2.381635430060611e-05,
"loss": 0.1041,
"step": 250
},
{
"epoch": 7.222222222222222,
"grad_norm": 0.5823981165885925,
"learning_rate": 2.0019406272005915e-05,
"loss": 0.1066,
"step": 260
},
{
"epoch": 7.5,
"grad_norm": 0.6076810359954834,
"learning_rate": 1.6475261319442553e-05,
"loss": 0.1131,
"step": 270
},
{
"epoch": 7.777777777777778,
"grad_norm": 0.5810424089431763,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.0964,
"step": 280
},
{
"epoch": 8.055555555555555,
"grad_norm": 0.5393444299697876,
"learning_rate": 1.0262537046815018e-05,
"loss": 0.0971,
"step": 290
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.5941327214241028,
"learning_rate": 7.646344808238903e-06,
"loss": 0.0933,
"step": 300
},
{
"epoch": 8.61111111111111,
"grad_norm": 0.4310052692890167,
"learning_rate": 5.387288069080299e-06,
"loss": 0.1067,
"step": 310
},
{
"epoch": 8.88888888888889,
"grad_norm": 0.4362979233264923,
"learning_rate": 3.504415701345615e-06,
"loss": 0.0891,
"step": 320
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.46284258365631104,
"learning_rate": 2.013604506230554e-06,
"loss": 0.1027,
"step": 330
},
{
"epoch": 9.444444444444445,
"grad_norm": 0.5020444393157959,
"learning_rate": 9.274253373791064e-07,
"loss": 0.0852,
"step": 340
},
{
"epoch": 9.722222222222221,
"grad_norm": 0.5854896903038025,
"learning_rate": 2.55037100632316e-07,
"loss": 0.0919,
"step": 350
},
{
"epoch": 10.0,
"grad_norm": 0.7395591735839844,
"learning_rate": 2.109524084381587e-09,
"loss": 0.0918,
"step": 360
},
{
"epoch": 10.0,
"step": 360,
"total_flos": 0.0,
"train_loss": 0.1817794324623214,
"train_runtime": 394.9899,
"train_samples_per_second": 44.432,
"train_steps_per_second": 0.911
}
],
"logging_steps": 10,
"max_steps": 360,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 3000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 49,
"trial_name": null,
"trial_params": null
}