| { | |
| "best_metric": 0.4874384105205536, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-50", | |
| "epoch": 3.0, | |
| "eval_steps": 50, | |
| "global_step": 57, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05263157894736842, | |
| "grad_norm": 1.5990679264068604, | |
| "learning_rate": 1e-05, | |
| "loss": 0.7011, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05263157894736842, | |
| "eval_loss": 0.8362977504730225, | |
| "eval_runtime": 0.6772, | |
| "eval_samples_per_second": 47.256, | |
| "eval_steps_per_second": 11.814, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.10526315789473684, | |
| "grad_norm": 1.8811979293823242, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8021, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.15789473684210525, | |
| "grad_norm": 2.2759976387023926, | |
| "learning_rate": 3e-05, | |
| "loss": 0.8729, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 3.0870163440704346, | |
| "learning_rate": 4e-05, | |
| "loss": 0.9876, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 1.3973842859268188, | |
| "learning_rate": 5e-05, | |
| "loss": 0.6165, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.3157894736842105, | |
| "grad_norm": 1.4728643894195557, | |
| "learning_rate": 6e-05, | |
| "loss": 0.6993, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.3684210526315789, | |
| "grad_norm": 1.3138588666915894, | |
| "learning_rate": 7e-05, | |
| "loss": 0.5367, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 1.9514648914337158, | |
| "learning_rate": 8e-05, | |
| "loss": 0.681, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.47368421052631576, | |
| "grad_norm": 1.3160473108291626, | |
| "learning_rate": 9e-05, | |
| "loss": 0.504, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 1.2779755592346191, | |
| "learning_rate": 0.0001, | |
| "loss": 0.4904, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.5789473684210527, | |
| "grad_norm": 1.6623477935791016, | |
| "learning_rate": 9.988834393115767e-05, | |
| "loss": 0.7162, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 1.4186451435089111, | |
| "learning_rate": 9.9553874407739e-05, | |
| "loss": 0.4598, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.6842105263157895, | |
| "grad_norm": 1.1295267343521118, | |
| "learning_rate": 9.899808525182935e-05, | |
| "loss": 0.5673, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.7368421052631579, | |
| "grad_norm": 1.3112547397613525, | |
| "learning_rate": 9.822345875271883e-05, | |
| "loss": 0.601, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.9896531701087952, | |
| "learning_rate": 9.723345458039594e-05, | |
| "loss": 0.4243, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 1.2591720819473267, | |
| "learning_rate": 9.603249433382144e-05, | |
| "loss": 0.5515, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.8947368421052632, | |
| "grad_norm": 1.0034816265106201, | |
| "learning_rate": 9.462594179299406e-05, | |
| "loss": 0.5372, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.9473684210526315, | |
| "grad_norm": 0.9740633368492126, | |
| "learning_rate": 9.302007896300698e-05, | |
| "loss": 0.5009, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.147552251815796, | |
| "learning_rate": 9.122207801708802e-05, | |
| "loss": 0.4853, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 0.6737469434738159, | |
| "learning_rate": 8.923996926393305e-05, | |
| "loss": 0.3504, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.1052631578947367, | |
| "grad_norm": 0.7563154101371765, | |
| "learning_rate": 8.708260528239788e-05, | |
| "loss": 0.4167, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.1578947368421053, | |
| "grad_norm": 0.8553800582885742, | |
| "learning_rate": 8.475962138373213e-05, | |
| "loss": 0.4623, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.2105263157894737, | |
| "grad_norm": 0.840371310710907, | |
| "learning_rate": 8.228139257794012e-05, | |
| "loss": 0.3146, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.263157894736842, | |
| "grad_norm": 0.8417588472366333, | |
| "learning_rate": 7.965898723646776e-05, | |
| "loss": 0.3939, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.7632646560668945, | |
| "learning_rate": 7.690411765816864e-05, | |
| "loss": 0.3994, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.368421052631579, | |
| "grad_norm": 0.8661929965019226, | |
| "learning_rate": 7.402908775933419e-05, | |
| "loss": 0.414, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.4210526315789473, | |
| "grad_norm": 1.0993640422821045, | |
| "learning_rate": 7.104673812141675e-05, | |
| "loss": 0.3882, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.4736842105263157, | |
| "grad_norm": 0.7187374234199524, | |
| "learning_rate": 6.797038864187564e-05, | |
| "loss": 0.3142, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.526315789473684, | |
| "grad_norm": 0.9711386561393738, | |
| "learning_rate": 6.481377904428171e-05, | |
| "loss": 0.4528, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 0.9168631434440613, | |
| "learning_rate": 6.159100751337642e-05, | |
| "loss": 0.329, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.631578947368421, | |
| "grad_norm": 0.9326000809669495, | |
| "learning_rate": 5.831646772915651e-05, | |
| "loss": 0.2751, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.6842105263157894, | |
| "grad_norm": 0.8286113142967224, | |
| "learning_rate": 5.5004784581204927e-05, | |
| "loss": 0.3887, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.736842105263158, | |
| "grad_norm": 0.7682293057441711, | |
| "learning_rate": 5.167074885038373e-05, | |
| "loss": 0.3525, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.7894736842105263, | |
| "grad_norm": 0.7892478704452515, | |
| "learning_rate": 4.832925114961629e-05, | |
| "loss": 0.3042, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.8966577649116516, | |
| "learning_rate": 4.4995215418795085e-05, | |
| "loss": 0.2861, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.8947368421052633, | |
| "grad_norm": 0.7951040863990784, | |
| "learning_rate": 4.1683532270843504e-05, | |
| "loss": 0.3405, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.9473684210526314, | |
| "grad_norm": 0.8542851805686951, | |
| "learning_rate": 3.840899248662358e-05, | |
| "loss": 0.3285, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.9358116388320923, | |
| "learning_rate": 3.5186220955718306e-05, | |
| "loss": 0.2819, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.0526315789473686, | |
| "grad_norm": 0.6613637804985046, | |
| "learning_rate": 3.202961135812437e-05, | |
| "loss": 0.2919, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 0.7066323161125183, | |
| "learning_rate": 2.895326187858326e-05, | |
| "loss": 0.2935, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.1578947368421053, | |
| "grad_norm": 0.6887481212615967, | |
| "learning_rate": 2.5970912240665813e-05, | |
| "loss": 0.2381, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 2.2105263157894735, | |
| "grad_norm": 0.8609808683395386, | |
| "learning_rate": 2.3095882341831372e-05, | |
| "loss": 0.2549, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 2.263157894736842, | |
| "grad_norm": 0.6327958106994629, | |
| "learning_rate": 2.0341012763532243e-05, | |
| "loss": 0.2311, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 2.3157894736842106, | |
| "grad_norm": 0.7251918911933899, | |
| "learning_rate": 1.771860742205988e-05, | |
| "loss": 0.2984, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 0.7226844429969788, | |
| "learning_rate": 1.5240378616267886e-05, | |
| "loss": 0.2503, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.4210526315789473, | |
| "grad_norm": 0.7345536947250366, | |
| "learning_rate": 1.2917394717602121e-05, | |
| "loss": 0.2231, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.473684210526316, | |
| "grad_norm": 0.6850237846374512, | |
| "learning_rate": 1.0760030736066951e-05, | |
| "loss": 0.2764, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 2.526315789473684, | |
| "grad_norm": 0.7278067469596863, | |
| "learning_rate": 8.777921982911996e-06, | |
| "loss": 0.2904, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 2.5789473684210527, | |
| "grad_norm": 0.7924470901489258, | |
| "learning_rate": 6.979921036993042e-06, | |
| "loss": 0.28, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 0.7697122693061829, | |
| "learning_rate": 5.374058207005944e-06, | |
| "loss": 0.1898, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "eval_loss": 0.4874384105205536, | |
| "eval_runtime": 0.6761, | |
| "eval_samples_per_second": 47.333, | |
| "eval_steps_per_second": 11.833, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.6842105263157894, | |
| "grad_norm": 0.6865087151527405, | |
| "learning_rate": 3.967505666178556e-06, | |
| "loss": 0.2486, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 2.736842105263158, | |
| "grad_norm": 0.6758986711502075, | |
| "learning_rate": 2.7665454196040664e-06, | |
| "loss": 0.2361, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 2.7894736842105265, | |
| "grad_norm": 0.7985853552818298, | |
| "learning_rate": 1.7765412472811771e-06, | |
| "loss": 0.2283, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 2.8421052631578947, | |
| "grad_norm": 0.7843177914619446, | |
| "learning_rate": 1.0019147481706625e-06, | |
| "loss": 0.1923, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 0.8056294918060303, | |
| "learning_rate": 4.461255922609986e-07, | |
| "loss": 0.3238, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.9473684210526314, | |
| "grad_norm": 0.7717021107673645, | |
| "learning_rate": 1.1165606884234181e-07, | |
| "loss": 0.2513, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.880278468132019, | |
| "learning_rate": 0.0, | |
| "loss": 0.2208, | |
| "step": 57 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 57, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.133593560612864e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |