| { | |
| "best_metric": 0.9981600735970562, | |
| "best_model_checkpoint": "data/train-test/bert-large-output//model/checkpoint-1676", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 1676, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.902126133441925, | |
| "learning_rate": 1.4033412887828164e-05, | |
| "loss": 0.0252, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": null, | |
| "eval_f1": 0.9935720844811754, | |
| "eval_loss": 0.005533559713512659, | |
| "eval_precision": 0.9917506874427131, | |
| "eval_recall": 0.9954001839926403, | |
| "eval_runtime": 4.2367, | |
| "eval_samples_per_second": 228.952, | |
| "eval_steps_per_second": 7.317, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 0.0071577695198357105, | |
| "learning_rate": 8.066825775656326e-06, | |
| "loss": 0.0047, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "grad_norm": 0.009138579480350018, | |
| "learning_rate": 2.100238663484487e-06, | |
| "loss": 0.0027, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": null, | |
| "eval_f1": 0.9967845659163987, | |
| "eval_loss": 0.003087937133386731, | |
| "eval_precision": 0.9954128440366973, | |
| "eval_recall": 0.9981600735970562, | |
| "eval_runtime": 4.2113, | |
| "eval_samples_per_second": 230.333, | |
| "eval_steps_per_second": 7.361, | |
| "step": 1676 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 1676, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "total_flos": 6896325795708240.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |