| { | |
| "best_metric": NaN, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-25", | |
| "epoch": 3.0684931506849313, | |
| "eval_steps": 25, | |
| "global_step": 42, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0730593607305936, | |
| "grad_norm": NaN, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0730593607305936, | |
| "eval_loss": NaN, | |
| "eval_runtime": 4.2998, | |
| "eval_samples_per_second": 11.628, | |
| "eval_steps_per_second": 3.023, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.1461187214611872, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.2191780821917808, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.986128001799077e-05, | |
| "loss": 0.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.2922374429223744, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.94459753267812e-05, | |
| "loss": 0.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.365296803652968, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.875664641789545e-05, | |
| "loss": 0.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.4383561643835616, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.779754323328192e-05, | |
| "loss": 0.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.5114155251141552, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.657457896300791e-05, | |
| "loss": 0.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.5844748858447488, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.509529358847655e-05, | |
| "loss": 0.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.6575342465753424, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.336880739593416e-05, | |
| "loss": 0.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.730593607305936, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.140576474687264e-05, | |
| "loss": 0.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.8036529680365296, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.921826845200139e-05, | |
| "loss": 0.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.8767123287671232, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.681980515339464e-05, | |
| "loss": 0.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.9497716894977168, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.422516217485826e-05, | |
| "loss": 0.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.0228310502283104, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.14503363531613e-05, | |
| "loss": 0.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.095890410958904, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.85124354122177e-05, | |
| "loss": 0.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.1689497716894977, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.542957248827961e-05, | |
| "loss": 0.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.2420091324200913, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.222075445642904e-05, | |
| "loss": 0.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.3150684931506849, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.890576474687263e-05, | |
| "loss": 0.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.3881278538812785, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.550504137351576e-05, | |
| "loss": 0.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.461187214611872, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.203955092681039e-05, | |
| "loss": 0.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.5342465753424657, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.8530659307753036e-05, | |
| "loss": 0.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.6073059360730593, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.500000000000001e-05, | |
| "loss": 0.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.6803652968036529, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.1469340692246995e-05, | |
| "loss": 0.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.7534246575342465, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.7960449073189606e-05, | |
| "loss": 0.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.82648401826484, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.4494958626484276e-05, | |
| "loss": 0.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.82648401826484, | |
| "eval_loss": NaN, | |
| "eval_runtime": 4.192, | |
| "eval_samples_per_second": 11.927, | |
| "eval_steps_per_second": 3.101, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.8995433789954337, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.109423525312738e-05, | |
| "loss": 0.0, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.9726027397260273, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.777924554357096e-05, | |
| "loss": 0.0, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.045662100456621, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.45704275117204e-05, | |
| "loss": 0.0, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.1187214611872145, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.1487564587782306e-05, | |
| "loss": 0.0, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 2.191780821917808, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.854966364683872e-05, | |
| "loss": 0.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.2648401826484017, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.577483782514174e-05, | |
| "loss": 0.0, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 2.3378995433789953, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.3180194846605367e-05, | |
| "loss": 0.0, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.410958904109589, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0781731547998614e-05, | |
| "loss": 0.0, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 2.4840182648401825, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.8594235253127375e-05, | |
| "loss": 0.0, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 2.557077625570776, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6631192604065855e-05, | |
| "loss": 0.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 2.6301369863013697, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.490470641152345e-05, | |
| "loss": 0.0, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 2.7031963470319633, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3425421036992098e-05, | |
| "loss": 0.0, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 2.776255707762557, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.2202456766718093e-05, | |
| "loss": 0.0, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.8493150684931505, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1243353582104556e-05, | |
| "loss": 0.0, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 2.922374429223744, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0554024673218807e-05, | |
| "loss": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.9954337899543377, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0138719982009242e-05, | |
| "loss": 0.0, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 3.0684931506849313, | |
| "grad_norm": NaN, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0, | |
| "step": 42 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 42, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 1, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.752737924158587e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |