nadejdatarabukina's picture
Training in progress, step 30, checkpoint
148ae48 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03461205653302567,
"eval_steps": 5,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011537352177675222,
"eval_loss": 1.1585837602615356,
"eval_runtime": 16.5538,
"eval_samples_per_second": 22.049,
"eval_steps_per_second": 11.055,
"step": 1
},
{
"epoch": 0.003461205653302567,
"grad_norm": 1.0870860815048218,
"learning_rate": 6e-05,
"loss": 4.7529,
"step": 3
},
{
"epoch": 0.005768676088837612,
"eval_loss": 1.1217771768569946,
"eval_runtime": 16.7257,
"eval_samples_per_second": 21.823,
"eval_steps_per_second": 10.941,
"step": 5
},
{
"epoch": 0.006922411306605134,
"grad_norm": 1.0277127027511597,
"learning_rate": 0.00012,
"loss": 4.7248,
"step": 6
},
{
"epoch": 0.010383616959907702,
"grad_norm": 1.0448460578918457,
"learning_rate": 0.00018,
"loss": 4.2535,
"step": 9
},
{
"epoch": 0.011537352177675224,
"eval_loss": 0.9416375160217285,
"eval_runtime": 16.8164,
"eval_samples_per_second": 21.705,
"eval_steps_per_second": 10.882,
"step": 10
},
{
"epoch": 0.013844822613210269,
"grad_norm": 1.648591160774231,
"learning_rate": 0.00019510565162951537,
"loss": 3.8484,
"step": 12
},
{
"epoch": 0.017306028266512834,
"grad_norm": 2.317676544189453,
"learning_rate": 0.00017071067811865476,
"loss": 3.1999,
"step": 15
},
{
"epoch": 0.017306028266512834,
"eval_loss": 0.7792818546295166,
"eval_runtime": 16.8701,
"eval_samples_per_second": 21.636,
"eval_steps_per_second": 10.848,
"step": 15
},
{
"epoch": 0.020767233919815404,
"grad_norm": 1.5525565147399902,
"learning_rate": 0.00013090169943749476,
"loss": 3.2798,
"step": 18
},
{
"epoch": 0.02307470435535045,
"eval_loss": 0.7133286595344543,
"eval_runtime": 16.8737,
"eval_samples_per_second": 21.631,
"eval_steps_per_second": 10.845,
"step": 20
},
{
"epoch": 0.02422843957311797,
"grad_norm": 1.2597628831863403,
"learning_rate": 8.435655349597689e-05,
"loss": 2.8077,
"step": 21
},
{
"epoch": 0.027689645226420537,
"grad_norm": 1.202157974243164,
"learning_rate": 4.12214747707527e-05,
"loss": 2.8526,
"step": 24
},
{
"epoch": 0.02884338044418806,
"eval_loss": 0.6947705745697021,
"eval_runtime": 16.8902,
"eval_samples_per_second": 21.61,
"eval_steps_per_second": 10.835,
"step": 25
},
{
"epoch": 0.031150850879723104,
"grad_norm": 1.0633991956710815,
"learning_rate": 1.0899347581163221e-05,
"loss": 2.864,
"step": 27
},
{
"epoch": 0.03461205653302567,
"grad_norm": 1.1619864702224731,
"learning_rate": 0.0,
"loss": 2.9061,
"step": 30
},
{
"epoch": 0.03461205653302567,
"eval_loss": 0.6916398406028748,
"eval_runtime": 16.8943,
"eval_samples_per_second": 21.605,
"eval_steps_per_second": 10.832,
"step": 30
}
],
"logging_steps": 3,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2625889099579392.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}