| { | |
| "best_metric": 10.373831748962402, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-50", | |
| "epoch": 0.5223636957231472, | |
| "eval_steps": 25, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010447273914462945, | |
| "grad_norm": 0.04309515655040741, | |
| "learning_rate": 5e-05, | |
| "loss": 10.3802, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.010447273914462945, | |
| "eval_loss": 10.380773544311523, | |
| "eval_runtime": 0.187, | |
| "eval_samples_per_second": 267.323, | |
| "eval_steps_per_second": 69.504, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02089454782892589, | |
| "grad_norm": 0.03844434395432472, | |
| "learning_rate": 0.0001, | |
| "loss": 10.3795, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.031341821743388835, | |
| "grad_norm": 0.03835219889879227, | |
| "learning_rate": 9.990365154573717e-05, | |
| "loss": 10.3813, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.04178909565785178, | |
| "grad_norm": 0.03615787997841835, | |
| "learning_rate": 9.961501876182148e-05, | |
| "loss": 10.3809, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.05223636957231472, | |
| "grad_norm": 0.03825068473815918, | |
| "learning_rate": 9.913533761814537e-05, | |
| "loss": 10.379, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06268364348677767, | |
| "grad_norm": 0.03499473258852959, | |
| "learning_rate": 9.846666218300807e-05, | |
| "loss": 10.3807, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.07313091740124061, | |
| "grad_norm": 0.0341096930205822, | |
| "learning_rate": 9.761185582727977e-05, | |
| "loss": 10.3809, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.08357819131570356, | |
| "grad_norm": 0.03829431161284447, | |
| "learning_rate": 9.657457896300791e-05, | |
| "loss": 10.3796, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0940254652301665, | |
| "grad_norm": 0.03924962878227234, | |
| "learning_rate": 9.535927336897098e-05, | |
| "loss": 10.38, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.10447273914462944, | |
| "grad_norm": 0.04425439238548279, | |
| "learning_rate": 9.397114317029975e-05, | |
| "loss": 10.3797, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1149200130590924, | |
| "grad_norm": 0.04947839677333832, | |
| "learning_rate": 9.241613255361455e-05, | |
| "loss": 10.3794, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.12536728697355534, | |
| "grad_norm": 0.05416706204414368, | |
| "learning_rate": 9.070090031310558e-05, | |
| "loss": 10.3793, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.13581456088801827, | |
| "grad_norm": 0.04637929052114487, | |
| "learning_rate": 8.883279133655399e-05, | |
| "loss": 10.3779, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.14626183480248123, | |
| "grad_norm": 0.04821914806962013, | |
| "learning_rate": 8.681980515339464e-05, | |
| "loss": 10.3762, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.15670910871694418, | |
| "grad_norm": 0.04534909501671791, | |
| "learning_rate": 8.467056167950311e-05, | |
| "loss": 10.378, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.1671563826314071, | |
| "grad_norm": 0.044793691486120224, | |
| "learning_rate": 8.239426430539243e-05, | |
| "loss": 10.3773, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.17760365654587007, | |
| "grad_norm": 0.045045748353004456, | |
| "learning_rate": 8.000066048588211e-05, | |
| "loss": 10.3778, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.188050930460333, | |
| "grad_norm": 0.049222979694604874, | |
| "learning_rate": 7.75e-05, | |
| "loss": 10.3772, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.19849820437479596, | |
| "grad_norm": 0.043892327696084976, | |
| "learning_rate": 7.490299105985507e-05, | |
| "loss": 10.3768, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.20894547828925888, | |
| "grad_norm": 0.044649962335824966, | |
| "learning_rate": 7.222075445642904e-05, | |
| "loss": 10.3787, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21939275220372184, | |
| "grad_norm": 0.044739287346601486, | |
| "learning_rate": 6.946477593864228e-05, | |
| "loss": 10.3774, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2298400261181848, | |
| "grad_norm": 0.046225033700466156, | |
| "learning_rate": 6.664685702961344e-05, | |
| "loss": 10.3769, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.24028730003264773, | |
| "grad_norm": 0.047752395272254944, | |
| "learning_rate": 6.377906449072578e-05, | |
| "loss": 10.3776, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.2507345739471107, | |
| "grad_norm": 0.06583589315414429, | |
| "learning_rate": 6.087367864990233e-05, | |
| "loss": 10.3753, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.2611818478615736, | |
| "grad_norm": 0.07963792979717255, | |
| "learning_rate": 5.794314081535644e-05, | |
| "loss": 10.3764, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.2611818478615736, | |
| "eval_loss": 10.376198768615723, | |
| "eval_runtime": 0.1793, | |
| "eval_samples_per_second": 278.798, | |
| "eval_steps_per_second": 72.487, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.27162912177603654, | |
| "grad_norm": 0.058083388954401016, | |
| "learning_rate": 5.500000000000001e-05, | |
| "loss": 10.3754, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.2820763956904995, | |
| "grad_norm": 0.055568333715200424, | |
| "learning_rate": 5.205685918464356e-05, | |
| "loss": 10.3756, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.29252366960496246, | |
| "grad_norm": 0.059653159230947495, | |
| "learning_rate": 4.912632135009769e-05, | |
| "loss": 10.3746, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3029709435194254, | |
| "grad_norm": 0.04649928957223892, | |
| "learning_rate": 4.6220935509274235e-05, | |
| "loss": 10.3756, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.31341821743388837, | |
| "grad_norm": 0.053945064544677734, | |
| "learning_rate": 4.3353142970386564e-05, | |
| "loss": 10.3747, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3238654913483513, | |
| "grad_norm": 0.05790293961763382, | |
| "learning_rate": 4.053522406135775e-05, | |
| "loss": 10.3745, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.3343127652628142, | |
| "grad_norm": 0.053165897727012634, | |
| "learning_rate": 3.777924554357096e-05, | |
| "loss": 10.3749, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.34476003917727716, | |
| "grad_norm": 0.04939047992229462, | |
| "learning_rate": 3.509700894014496e-05, | |
| "loss": 10.3745, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.35520731309174014, | |
| "grad_norm": 0.053896188735961914, | |
| "learning_rate": 3.250000000000001e-05, | |
| "loss": 10.3738, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.36565458700620307, | |
| "grad_norm": 0.05971045047044754, | |
| "learning_rate": 2.9999339514117912e-05, | |
| "loss": 10.3765, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.376101860920666, | |
| "grad_norm": 0.06172914803028107, | |
| "learning_rate": 2.760573569460757e-05, | |
| "loss": 10.3769, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.386549134835129, | |
| "grad_norm": 0.07987114787101746, | |
| "learning_rate": 2.53294383204969e-05, | |
| "loss": 10.3756, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.3969964087495919, | |
| "grad_norm": 0.05999770760536194, | |
| "learning_rate": 2.3180194846605367e-05, | |
| "loss": 10.3754, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.40744368266405484, | |
| "grad_norm": 0.05834446847438812, | |
| "learning_rate": 2.1167208663446025e-05, | |
| "loss": 10.3746, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.41789095657851777, | |
| "grad_norm": 0.056418679654598236, | |
| "learning_rate": 1.9299099686894423e-05, | |
| "loss": 10.3735, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.42833823049298075, | |
| "grad_norm": 0.06561548262834549, | |
| "learning_rate": 1.758386744638546e-05, | |
| "loss": 10.3724, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.4387855044074437, | |
| "grad_norm": 0.064185731112957, | |
| "learning_rate": 1.602885682970026e-05, | |
| "loss": 10.3736, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.4492327783219066, | |
| "grad_norm": 0.06568823754787445, | |
| "learning_rate": 1.464072663102903e-05, | |
| "loss": 10.3738, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.4596800522363696, | |
| "grad_norm": 0.05658630281686783, | |
| "learning_rate": 1.3425421036992098e-05, | |
| "loss": 10.3741, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.4701273261508325, | |
| "grad_norm": 0.05986878648400307, | |
| "learning_rate": 1.2388144172720251e-05, | |
| "loss": 10.3735, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.48057460006529545, | |
| "grad_norm": 0.05994442105293274, | |
| "learning_rate": 1.1533337816991932e-05, | |
| "loss": 10.3764, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.4910218739797584, | |
| "grad_norm": 0.06321598589420319, | |
| "learning_rate": 1.0864662381854632e-05, | |
| "loss": 10.3727, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.5014691478942214, | |
| "grad_norm": 0.06735363602638245, | |
| "learning_rate": 1.0384981238178534e-05, | |
| "loss": 10.3738, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.5119164218086842, | |
| "grad_norm": 0.08081047981977463, | |
| "learning_rate": 1.0096348454262845e-05, | |
| "loss": 10.3738, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.5223636957231472, | |
| "grad_norm": 0.0807819813489914, | |
| "learning_rate": 1e-05, | |
| "loss": 10.3769, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5223636957231472, | |
| "eval_loss": 10.373831748962402, | |
| "eval_runtime": 0.1835, | |
| "eval_samples_per_second": 272.468, | |
| "eval_steps_per_second": 70.842, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 50, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 1, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 42821519671296.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |