| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.021460954475950318, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00010730477237975159, | |
| "eval_loss": 6.268192291259766, | |
| "eval_runtime": 13.4741, | |
| "eval_samples_per_second": 291.226, | |
| "eval_steps_per_second": 145.613, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.001073047723797516, | |
| "grad_norm": 9355.642578125, | |
| "learning_rate": 0.0002, | |
| "loss": 22.5946, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.002146095447595032, | |
| "grad_norm": 6737.57470703125, | |
| "learning_rate": 0.0002, | |
| "loss": 19.6532, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0032191431713925476, | |
| "grad_norm": 20781.78125, | |
| "learning_rate": 0.0002, | |
| "loss": 16.7741, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.004292190895190064, | |
| "grad_norm": 11265.677734375, | |
| "learning_rate": 0.0002, | |
| "loss": 15.907, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0053652386189875795, | |
| "grad_norm": 18117.19140625, | |
| "learning_rate": 0.0002, | |
| "loss": 15.2936, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0053652386189875795, | |
| "eval_loss": 3.753488302230835, | |
| "eval_runtime": 13.1495, | |
| "eval_samples_per_second": 298.414, | |
| "eval_steps_per_second": 149.207, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.006438286342785095, | |
| "grad_norm": 12260.19140625, | |
| "learning_rate": 0.0002, | |
| "loss": 15.0047, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0075113340665826115, | |
| "grad_norm": 14937.4189453125, | |
| "learning_rate": 0.0002, | |
| "loss": 16.1831, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.008584381790380128, | |
| "grad_norm": 12745.7001953125, | |
| "learning_rate": 0.0002, | |
| "loss": 16.4468, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.009657429514177644, | |
| "grad_norm": 9584.7265625, | |
| "learning_rate": 0.0002, | |
| "loss": 16.3645, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.010730477237975159, | |
| "grad_norm": 10061.3291015625, | |
| "learning_rate": 0.0002, | |
| "loss": 16.2659, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.010730477237975159, | |
| "eval_loss": 4.0766682624816895, | |
| "eval_runtime": 13.2944, | |
| "eval_samples_per_second": 295.163, | |
| "eval_steps_per_second": 147.581, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.011803524961772675, | |
| "grad_norm": 7551.27392578125, | |
| "learning_rate": 0.0002, | |
| "loss": 15.8545, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.01287657268557019, | |
| "grad_norm": 9171.0, | |
| "learning_rate": 0.0002, | |
| "loss": 14.3188, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.013949620409367706, | |
| "grad_norm": 8303.6533203125, | |
| "learning_rate": 0.0002, | |
| "loss": 13.6695, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.015022668133165223, | |
| "grad_norm": 7741.68896484375, | |
| "learning_rate": 0.0002, | |
| "loss": 13.0679, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.016095715856962737, | |
| "grad_norm": 9833.615234375, | |
| "learning_rate": 0.0002, | |
| "loss": 13.3664, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.016095715856962737, | |
| "eval_loss": 3.362622022628784, | |
| "eval_runtime": 13.3642, | |
| "eval_samples_per_second": 293.62, | |
| "eval_steps_per_second": 146.81, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.017168763580760256, | |
| "grad_norm": 8797.31640625, | |
| "learning_rate": 0.0002, | |
| "loss": 14.0072, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.01824181130455777, | |
| "grad_norm": 9056.8310546875, | |
| "learning_rate": 0.0002, | |
| "loss": 13.368, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.019314859028355287, | |
| "grad_norm": 7576.951171875, | |
| "learning_rate": 0.0002, | |
| "loss": 13.6952, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.020387906752152803, | |
| "grad_norm": 5631.87890625, | |
| "learning_rate": 0.0002, | |
| "loss": 12.4032, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.021460954475950318, | |
| "grad_norm": 7482.162109375, | |
| "learning_rate": 0.0002, | |
| "loss": 12.4731, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.021460954475950318, | |
| "eval_loss": 3.307003974914551, | |
| "eval_runtime": 13.2546, | |
| "eval_samples_per_second": 296.048, | |
| "eval_steps_per_second": 148.024, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 37980261580800.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |