| { | |
| "best_metric": 0.4826611280441284, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-50", | |
| "epoch": 0.0019685814402141816, | |
| "eval_steps": 50, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 3.937162880428363e-05, | |
| "grad_norm": 0.1473366916179657, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4842, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 3.937162880428363e-05, | |
| "eval_loss": 0.6386224031448364, | |
| "eval_runtime": 1995.7368, | |
| "eval_samples_per_second": 21.435, | |
| "eval_steps_per_second": 5.359, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 7.874325760856726e-05, | |
| "grad_norm": 0.1501932293176651, | |
| "learning_rate": 2e-05, | |
| "loss": 0.4723, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0001181148864128509, | |
| "grad_norm": 0.15301698446273804, | |
| "learning_rate": 3e-05, | |
| "loss": 0.465, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.00015748651521713453, | |
| "grad_norm": 0.15504051744937897, | |
| "learning_rate": 4e-05, | |
| "loss": 0.4613, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.00019685814402141817, | |
| "grad_norm": 0.17802225053310394, | |
| "learning_rate": 5e-05, | |
| "loss": 0.4573, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0002362297728257018, | |
| "grad_norm": 0.16927273571491241, | |
| "learning_rate": 6e-05, | |
| "loss": 0.4521, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.00027560140162998545, | |
| "grad_norm": 0.17416727542877197, | |
| "learning_rate": 7e-05, | |
| "loss": 0.4688, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.00031497303043426906, | |
| "grad_norm": 0.19252535700798035, | |
| "learning_rate": 8e-05, | |
| "loss": 0.4869, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0003543446592385527, | |
| "grad_norm": 0.21720175445079803, | |
| "learning_rate": 9e-05, | |
| "loss": 0.4392, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.00039371628804283634, | |
| "grad_norm": 0.20339831709861755, | |
| "learning_rate": 0.0001, | |
| "loss": 0.4673, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00043308791684711995, | |
| "grad_norm": 0.188543438911438, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 0.4404, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0004724595456514036, | |
| "grad_norm": 0.1840866506099701, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.4308, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0005118311744556872, | |
| "grad_norm": 0.1921333223581314, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.4258, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.0005512028032599709, | |
| "grad_norm": 0.1822589933872223, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.4197, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0005905744320642545, | |
| "grad_norm": 0.17518241703510284, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.4372, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.0006299460608685381, | |
| "grad_norm": 0.16869305074214935, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.4088, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.0006693176896728218, | |
| "grad_norm": 0.18317943811416626, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.4121, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.0007086893184771055, | |
| "grad_norm": 0.1914009302854538, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.4111, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.000748060947281389, | |
| "grad_norm": 0.17840923368930817, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.4001, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0007874325760856727, | |
| "grad_norm": 0.19282005727291107, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.4029, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0008268042048899563, | |
| "grad_norm": 0.2061193436384201, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.3986, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.0008661758336942399, | |
| "grad_norm": 0.20973272621631622, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 0.4132, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.0009055474624985236, | |
| "grad_norm": 0.20912733674049377, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.393, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.0009449190913028072, | |
| "grad_norm": 0.20087695121765137, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.3943, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.0009842907201070908, | |
| "grad_norm": 0.2549019157886505, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 0.4077, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0010236623489113745, | |
| "grad_norm": 0.21350446343421936, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 0.3767, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.0010630339777156581, | |
| "grad_norm": 0.2284364104270935, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 0.3941, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.0011024056065199418, | |
| "grad_norm": 0.20515067875385284, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.3623, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.0011417772353242255, | |
| "grad_norm": 0.22672885656356812, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.3734, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.001181148864128509, | |
| "grad_norm": 0.2223028987646103, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.3831, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0012205204929327926, | |
| "grad_norm": 0.22611401975154877, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 0.3728, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.0012598921217370762, | |
| "grad_norm": 0.23607665300369263, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.387, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.00129926375054136, | |
| "grad_norm": 0.24884606897830963, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 0.3784, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.0013386353793456436, | |
| "grad_norm": 0.2478945106267929, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 0.3671, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.0013780070081499272, | |
| "grad_norm": 0.2744622230529785, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.3852, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.001417378636954211, | |
| "grad_norm": 0.2842346727848053, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.3984, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.0014567502657584943, | |
| "grad_norm": 0.3080979585647583, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 0.4233, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.001496121894562778, | |
| "grad_norm": 0.32813560962677, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 0.3906, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.0015354935233670617, | |
| "grad_norm": 0.3377130627632141, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.3866, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.0015748651521713453, | |
| "grad_norm": 0.38843271136283875, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 0.4104, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.001614236780975629, | |
| "grad_norm": 0.3918204605579376, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.4434, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.0016536084097799127, | |
| "grad_norm": 0.4351727068424225, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.4251, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.0016929800385841961, | |
| "grad_norm": 0.5044992566108704, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 0.4586, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.0017323516673884798, | |
| "grad_norm": 0.4990330636501312, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.4392, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.0017717232961927635, | |
| "grad_norm": 0.5468814373016357, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.5086, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.0018110949249970471, | |
| "grad_norm": 0.6528873443603516, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.4761, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.0018504665538013308, | |
| "grad_norm": 0.7145413160324097, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 0.4981, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.0018898381826056145, | |
| "grad_norm": 0.8748348951339722, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.4825, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.0019292098114098981, | |
| "grad_norm": 1.0368186235427856, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 0.5143, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.0019685814402141816, | |
| "grad_norm": 1.806100606918335, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 0.736, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0019685814402141816, | |
| "eval_loss": 0.4826611280441284, | |
| "eval_runtime": 2007.0233, | |
| "eval_samples_per_second": 21.314, | |
| "eval_steps_per_second": 5.329, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.62025180463104e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |