| { | |
| "best_metric": NaN, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-25", | |
| "epoch": 0.3427592116538132, | |
| "eval_steps": 25, | |
| "global_step": 25, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013710368466152529, | |
| "grad_norm": NaN, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.013710368466152529, | |
| "eval_loss": NaN, | |
| "eval_runtime": 11.161, | |
| "eval_samples_per_second": 4.48, | |
| "eval_steps_per_second": 4.48, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.027420736932305057, | |
| "grad_norm": NaN, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.04113110539845758, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.958086757163489e-05, | |
| "loss": 0.0, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.054841473864610114, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.833127793065098e-05, | |
| "loss": 0.0, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06855184233076264, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.627450856774539e-05, | |
| "loss": 0.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08226221079691516, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.3448873204592e-05, | |
| "loss": 0.0, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0959725792630677, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.990700808169889e-05, | |
| "loss": 0.0, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10968294772922023, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.571489144483944e-05, | |
| "loss": 0.0, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.12339331619537275, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.095061449516903e-05, | |
| "loss": 0.0, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.13710368466152528, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.570292669790186e-05, | |
| "loss": 0.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15081405312767782, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.006958254769438e-05, | |
| "loss": 0.0, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.16452442159383032, | |
| "grad_norm": NaN, | |
| "learning_rate": 6.415552058736854e-05, | |
| "loss": 0.0, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.17823479005998286, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.80709086014102e-05, | |
| "loss": 0.0, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.1919451585261354, | |
| "grad_norm": NaN, | |
| "learning_rate": 5.192909139858981e-05, | |
| "loss": 0.0, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.20565552699228792, | |
| "grad_norm": NaN, | |
| "learning_rate": 4.584447941263149e-05, | |
| "loss": 0.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.21936589545844046, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.9930417452305626e-05, | |
| "loss": 0.0, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.23307626392459296, | |
| "grad_norm": NaN, | |
| "learning_rate": 3.4297073302098156e-05, | |
| "loss": 0.0, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.2467866323907455, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.9049385504830985e-05, | |
| "loss": 0.0, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.26049700085689803, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.4285108555160577e-05, | |
| "loss": 0.0, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.27420736932305056, | |
| "grad_norm": NaN, | |
| "learning_rate": 2.0092991918301108e-05, | |
| "loss": 0.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2879177377892031, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.6551126795408016e-05, | |
| "loss": 0.0, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.30162810625535563, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.3725491432254624e-05, | |
| "loss": 0.0, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.31533847472150817, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.1668722069349041e-05, | |
| "loss": 0.0, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.32904884318766064, | |
| "grad_norm": NaN, | |
| "learning_rate": 1.0419132428365116e-05, | |
| "loss": 0.0, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3427592116538132, | |
| "grad_norm": NaN, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3427592116538132, | |
| "eval_loss": NaN, | |
| "eval_runtime": 11.2908, | |
| "eval_samples_per_second": 4.428, | |
| "eval_steps_per_second": 4.428, | |
| "step": 25 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 25, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 1, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.03007243108352e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |