| { | |
| "best_global_step": 741, | |
| "best_metric": 0.012937678955495358, | |
| "best_model_checkpoint": "saves_stability/prefix-tuning/llama-3-8b-instruct/train_cb_1757081469/checkpoint-741", | |
| "epoch": 10.0, | |
| "eval_steps": 57, | |
| "global_step": 1130, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04424778761061947, | |
| "grad_norm": 172.90432739257812, | |
| "learning_rate": 1.7699115044247788e-06, | |
| "loss": 9.3038, | |
| "num_input_tokens_seen": 1520, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08849557522123894, | |
| "grad_norm": 162.92501831054688, | |
| "learning_rate": 3.982300884955752e-06, | |
| "loss": 7.8011, | |
| "num_input_tokens_seen": 2976, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13274336283185842, | |
| "grad_norm": 118.36426544189453, | |
| "learning_rate": 6.194690265486726e-06, | |
| "loss": 5.8089, | |
| "num_input_tokens_seen": 4688, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.17699115044247787, | |
| "grad_norm": 119.76227569580078, | |
| "learning_rate": 8.407079646017701e-06, | |
| "loss": 3.2162, | |
| "num_input_tokens_seen": 5776, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.22123893805309736, | |
| "grad_norm": 63.143497467041016, | |
| "learning_rate": 1.0619469026548673e-05, | |
| "loss": 1.0524, | |
| "num_input_tokens_seen": 6992, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.26548672566371684, | |
| "grad_norm": 64.27384185791016, | |
| "learning_rate": 1.2831858407079647e-05, | |
| "loss": 1.2224, | |
| "num_input_tokens_seen": 8320, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.30973451327433627, | |
| "grad_norm": 54.031402587890625, | |
| "learning_rate": 1.504424778761062e-05, | |
| "loss": 0.959, | |
| "num_input_tokens_seen": 9536, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.35398230088495575, | |
| "grad_norm": 58.8524284362793, | |
| "learning_rate": 1.7256637168141594e-05, | |
| "loss": 0.3716, | |
| "num_input_tokens_seen": 11072, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.39823008849557523, | |
| "grad_norm": 48.3114128112793, | |
| "learning_rate": 1.946902654867257e-05, | |
| "loss": 1.0354, | |
| "num_input_tokens_seen": 12320, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.4424778761061947, | |
| "grad_norm": 48.01361083984375, | |
| "learning_rate": 2.1681415929203542e-05, | |
| "loss": 0.3628, | |
| "num_input_tokens_seen": 13616, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.48672566371681414, | |
| "grad_norm": 65.13843536376953, | |
| "learning_rate": 2.3893805309734516e-05, | |
| "loss": 0.8643, | |
| "num_input_tokens_seen": 15104, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.504424778761062, | |
| "eval_loss": 0.4547373652458191, | |
| "eval_runtime": 0.7799, | |
| "eval_samples_per_second": 32.053, | |
| "eval_steps_per_second": 16.668, | |
| "num_input_tokens_seen": 15664, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.5309734513274337, | |
| "grad_norm": 16.273540496826172, | |
| "learning_rate": 2.610619469026549e-05, | |
| "loss": 0.3507, | |
| "num_input_tokens_seen": 16352, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5752212389380531, | |
| "grad_norm": 6.617971897125244, | |
| "learning_rate": 2.831858407079646e-05, | |
| "loss": 0.3175, | |
| "num_input_tokens_seen": 17888, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.6194690265486725, | |
| "grad_norm": 56.716156005859375, | |
| "learning_rate": 3.0530973451327434e-05, | |
| "loss": 0.7932, | |
| "num_input_tokens_seen": 19008, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6637168141592921, | |
| "grad_norm": 6.397486686706543, | |
| "learning_rate": 3.274336283185841e-05, | |
| "loss": 0.2819, | |
| "num_input_tokens_seen": 20480, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.7079646017699115, | |
| "grad_norm": 49.061546325683594, | |
| "learning_rate": 3.495575221238938e-05, | |
| "loss": 0.5588, | |
| "num_input_tokens_seen": 22000, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.7522123893805309, | |
| "grad_norm": 101.45230102539062, | |
| "learning_rate": 3.716814159292036e-05, | |
| "loss": 0.303, | |
| "num_input_tokens_seen": 23456, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.7964601769911505, | |
| "grad_norm": 7.564614772796631, | |
| "learning_rate": 3.938053097345133e-05, | |
| "loss": 0.2319, | |
| "num_input_tokens_seen": 25280, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.8407079646017699, | |
| "grad_norm": 7.837498188018799, | |
| "learning_rate": 4.15929203539823e-05, | |
| "loss": 0.1757, | |
| "num_input_tokens_seen": 26496, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.8849557522123894, | |
| "grad_norm": 11.753522872924805, | |
| "learning_rate": 4.380530973451328e-05, | |
| "loss": 0.1669, | |
| "num_input_tokens_seen": 28016, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.9292035398230089, | |
| "grad_norm": 17.86298179626465, | |
| "learning_rate": 4.601769911504425e-05, | |
| "loss": 0.538, | |
| "num_input_tokens_seen": 29344, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.9734513274336283, | |
| "grad_norm": 15.845224380493164, | |
| "learning_rate": 4.823008849557522e-05, | |
| "loss": 0.2715, | |
| "num_input_tokens_seen": 30496, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.008849557522124, | |
| "eval_loss": 0.5620186924934387, | |
| "eval_runtime": 0.7758, | |
| "eval_samples_per_second": 32.224, | |
| "eval_steps_per_second": 16.756, | |
| "num_input_tokens_seen": 31304, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.0176991150442478, | |
| "grad_norm": 20.063274383544922, | |
| "learning_rate": 4.9999880720033856e-05, | |
| "loss": 0.2842, | |
| "num_input_tokens_seen": 31560, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.0619469026548674, | |
| "grad_norm": 4.028425693511963, | |
| "learning_rate": 4.999570604073014e-05, | |
| "loss": 0.9137, | |
| "num_input_tokens_seen": 32744, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1061946902654867, | |
| "grad_norm": 11.262805938720703, | |
| "learning_rate": 4.998556850128691e-05, | |
| "loss": 0.4827, | |
| "num_input_tokens_seen": 34440, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.1504424778761062, | |
| "grad_norm": 229.07241821289062, | |
| "learning_rate": 4.996947052006874e-05, | |
| "loss": 0.4721, | |
| "num_input_tokens_seen": 35992, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.1946902654867257, | |
| "grad_norm": 33.70645523071289, | |
| "learning_rate": 4.9947415937335635e-05, | |
| "loss": 0.6239, | |
| "num_input_tokens_seen": 37480, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.238938053097345, | |
| "grad_norm": 20.447160720825195, | |
| "learning_rate": 4.9919410014326955e-05, | |
| "loss": 0.3049, | |
| "num_input_tokens_seen": 38776, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.2831858407079646, | |
| "grad_norm": 11.072810173034668, | |
| "learning_rate": 4.98854594320063e-05, | |
| "loss": 0.2219, | |
| "num_input_tokens_seen": 40408, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.3274336283185841, | |
| "grad_norm": 5.849891185760498, | |
| "learning_rate": 4.98455722894677e-05, | |
| "loss": 0.9584, | |
| "num_input_tokens_seen": 41944, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.3716814159292037, | |
| "grad_norm": 3.3164310455322266, | |
| "learning_rate": 4.979975810200359e-05, | |
| "loss": 0.2229, | |
| "num_input_tokens_seen": 43576, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.415929203539823, | |
| "grad_norm": 149.41354370117188, | |
| "learning_rate": 4.974802779883484e-05, | |
| "loss": 0.5863, | |
| "num_input_tokens_seen": 44984, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.4601769911504425, | |
| "grad_norm": 9.543984413146973, | |
| "learning_rate": 4.969039372050356e-05, | |
| "loss": 0.5308, | |
| "num_input_tokens_seen": 46424, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.504424778761062, | |
| "grad_norm": 14.939043045043945, | |
| "learning_rate": 4.9626869615929175e-05, | |
| "loss": 0.3501, | |
| "num_input_tokens_seen": 47800, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.5132743362831858, | |
| "eval_loss": 0.2233014702796936, | |
| "eval_runtime": 0.7784, | |
| "eval_samples_per_second": 32.116, | |
| "eval_steps_per_second": 16.7, | |
| "num_input_tokens_seen": 48008, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 1.5486725663716814, | |
| "grad_norm": 2.0158536434173584, | |
| "learning_rate": 4.9557470639128554e-05, | |
| "loss": 0.4255, | |
| "num_input_tokens_seen": 48776, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.592920353982301, | |
| "grad_norm": 4.7409796714782715, | |
| "learning_rate": 4.9482213345600936e-05, | |
| "loss": 0.1645, | |
| "num_input_tokens_seen": 49960, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.6371681415929205, | |
| "grad_norm": 0.18256613612174988, | |
| "learning_rate": 4.940111568837854e-05, | |
| "loss": 0.0174, | |
| "num_input_tokens_seen": 51624, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.6814159292035398, | |
| "grad_norm": 16.183256149291992, | |
| "learning_rate": 4.931419701374377e-05, | |
| "loss": 0.2941, | |
| "num_input_tokens_seen": 53048, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.7256637168141593, | |
| "grad_norm": 20.320951461791992, | |
| "learning_rate": 4.922147805661402e-05, | |
| "loss": 1.1045, | |
| "num_input_tokens_seen": 54552, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.7699115044247788, | |
| "grad_norm": 12.029666900634766, | |
| "learning_rate": 4.91229809355953e-05, | |
| "loss": 0.4722, | |
| "num_input_tokens_seen": 55656, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.8141592920353982, | |
| "grad_norm": 0.27077895402908325, | |
| "learning_rate": 4.901872914770569e-05, | |
| "loss": 0.3305, | |
| "num_input_tokens_seen": 56984, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.8584070796460177, | |
| "grad_norm": 3.5023787021636963, | |
| "learning_rate": 4.8908747562769995e-05, | |
| "loss": 0.5475, | |
| "num_input_tokens_seen": 58200, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.9026548672566372, | |
| "grad_norm": 6.9777631759643555, | |
| "learning_rate": 4.8793062417486976e-05, | |
| "loss": 0.1367, | |
| "num_input_tokens_seen": 59544, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 1.9469026548672566, | |
| "grad_norm": 9.807963371276855, | |
| "learning_rate": 4.867170130917034e-05, | |
| "loss": 0.5382, | |
| "num_input_tokens_seen": 60968, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.991150442477876, | |
| "grad_norm": 10.069539070129395, | |
| "learning_rate": 4.8544693189165324e-05, | |
| "loss": 0.5262, | |
| "num_input_tokens_seen": 62216, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.017699115044248, | |
| "eval_loss": 0.32860472798347473, | |
| "eval_runtime": 0.777, | |
| "eval_samples_per_second": 32.176, | |
| "eval_steps_per_second": 16.731, | |
| "num_input_tokens_seen": 63040, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 2.0353982300884956, | |
| "grad_norm": 14.066644668579102, | |
| "learning_rate": 4.841206835594222e-05, | |
| "loss": 0.2754, | |
| "num_input_tokens_seen": 63568, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.079646017699115, | |
| "grad_norm": 9.898775100708008, | |
| "learning_rate": 4.8273858447868424e-05, | |
| "loss": 0.1062, | |
| "num_input_tokens_seen": 64800, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 2.1238938053097347, | |
| "grad_norm": 0.7710229158401489, | |
| "learning_rate": 4.813009643566101e-05, | |
| "loss": 0.4179, | |
| "num_input_tokens_seen": 66144, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.168141592920354, | |
| "grad_norm": 0.04852516949176788, | |
| "learning_rate": 4.798081661452135e-05, | |
| "loss": 0.047, | |
| "num_input_tokens_seen": 67600, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 2.2123893805309733, | |
| "grad_norm": 14.3930082321167, | |
| "learning_rate": 4.7826054595953815e-05, | |
| "loss": 0.3323, | |
| "num_input_tokens_seen": 69056, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.256637168141593, | |
| "grad_norm": 2.9247560501098633, | |
| "learning_rate": 4.766584729927049e-05, | |
| "loss": 0.361, | |
| "num_input_tokens_seen": 70304, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 2.3008849557522124, | |
| "grad_norm": 0.6192066073417664, | |
| "learning_rate": 4.750023294278381e-05, | |
| "loss": 0.1337, | |
| "num_input_tokens_seen": 71872, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.3451327433628317, | |
| "grad_norm": 6.6043701171875, | |
| "learning_rate": 4.732925103468944e-05, | |
| "loss": 0.2764, | |
| "num_input_tokens_seen": 73520, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 2.3893805309734515, | |
| "grad_norm": 0.0575706772506237, | |
| "learning_rate": 4.715294236364135e-05, | |
| "loss": 0.0771, | |
| "num_input_tokens_seen": 75184, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.433628318584071, | |
| "grad_norm": 9.62030029296875, | |
| "learning_rate": 4.6971348989021446e-05, | |
| "loss": 0.0644, | |
| "num_input_tokens_seen": 76592, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.47787610619469, | |
| "grad_norm": 11.746428489685059, | |
| "learning_rate": 4.6784514230906146e-05, | |
| "loss": 0.7766, | |
| "num_input_tokens_seen": 77632, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.52212389380531, | |
| "grad_norm": 0.6659058928489685, | |
| "learning_rate": 4.659248265973205e-05, | |
| "loss": 0.2992, | |
| "num_input_tokens_seen": 79104, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.52212389380531, | |
| "eval_loss": 0.13774029910564423, | |
| "eval_runtime": 0.7715, | |
| "eval_samples_per_second": 32.404, | |
| "eval_steps_per_second": 16.85, | |
| "num_input_tokens_seen": 79104, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 2.566371681415929, | |
| "grad_norm": 7.603933334350586, | |
| "learning_rate": 4.639530008566349e-05, | |
| "loss": 0.0878, | |
| "num_input_tokens_seen": 80432, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.6106194690265485, | |
| "grad_norm": 12.166590690612793, | |
| "learning_rate": 4.6193013547664255e-05, | |
| "loss": 0.5536, | |
| "num_input_tokens_seen": 81744, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 2.6548672566371683, | |
| "grad_norm": 20.62239646911621, | |
| "learning_rate": 4.5985671302276165e-05, | |
| "loss": 0.6957, | |
| "num_input_tokens_seen": 83104, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.6991150442477876, | |
| "grad_norm": 7.500833034515381, | |
| "learning_rate": 4.577332281210727e-05, | |
| "loss": 0.3277, | |
| "num_input_tokens_seen": 84336, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 2.7433628318584073, | |
| "grad_norm": 4.258105278015137, | |
| "learning_rate": 4.55560187340322e-05, | |
| "loss": 0.4949, | |
| "num_input_tokens_seen": 85936, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.7876106194690267, | |
| "grad_norm": 1.3956164121627808, | |
| "learning_rate": 4.533381090710776e-05, | |
| "loss": 0.1694, | |
| "num_input_tokens_seen": 87232, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 2.831858407079646, | |
| "grad_norm": 0.49883967638015747, | |
| "learning_rate": 4.5106752340206435e-05, | |
| "loss": 0.0296, | |
| "num_input_tokens_seen": 88336, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.8761061946902657, | |
| "grad_norm": 4.606618881225586, | |
| "learning_rate": 4.4874897199370805e-05, | |
| "loss": 0.3007, | |
| "num_input_tokens_seen": 89584, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 2.920353982300885, | |
| "grad_norm": 8.798304557800293, | |
| "learning_rate": 4.463830079489196e-05, | |
| "loss": 0.1205, | |
| "num_input_tokens_seen": 91008, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.9646017699115044, | |
| "grad_norm": 0.30231973528862, | |
| "learning_rate": 4.4397019568114987e-05, | |
| "loss": 0.3998, | |
| "num_input_tokens_seen": 92288, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 3.0088495575221237, | |
| "grad_norm": 1.7744728326797485, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 0.0962, | |
| "num_input_tokens_seen": 93456, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.0265486725663715, | |
| "eval_loss": 0.15844351053237915, | |
| "eval_runtime": 0.7752, | |
| "eval_samples_per_second": 32.251, | |
| "eval_steps_per_second": 16.77, | |
| "num_input_tokens_seen": 93888, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 3.0530973451327434, | |
| "grad_norm": 0.12336965650320053, | |
| "learning_rate": 4.390063398726356e-05, | |
| "loss": 0.0034, | |
| "num_input_tokens_seen": 94752, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 3.0973451327433628, | |
| "grad_norm": 0.4525299072265625, | |
| "learning_rate": 4.3645648048639734e-05, | |
| "loss": 0.3721, | |
| "num_input_tokens_seen": 96000, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.1415929203539825, | |
| "grad_norm": 2.180138111114502, | |
| "learning_rate": 4.338621409037031e-05, | |
| "loss": 0.0544, | |
| "num_input_tokens_seen": 97568, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 3.185840707964602, | |
| "grad_norm": 15.457188606262207, | |
| "learning_rate": 4.312239400182166e-05, | |
| "loss": 0.2974, | |
| "num_input_tokens_seen": 98896, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.230088495575221, | |
| "grad_norm": 0.034761518239974976, | |
| "learning_rate": 4.285425071869511e-05, | |
| "loss": 0.2165, | |
| "num_input_tokens_seen": 100336, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 3.274336283185841, | |
| "grad_norm": 8.344590187072754, | |
| "learning_rate": 4.258184820801331e-05, | |
| "loss": 0.3636, | |
| "num_input_tokens_seen": 101888, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 3.3185840707964602, | |
| "grad_norm": 13.406426429748535, | |
| "learning_rate": 4.230525145286057e-05, | |
| "loss": 0.4511, | |
| "num_input_tokens_seen": 103184, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 3.3628318584070795, | |
| "grad_norm": 0.5580623745918274, | |
| "learning_rate": 4.2024526436880785e-05, | |
| "loss": 0.0776, | |
| "num_input_tokens_seen": 104672, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.4070796460176993, | |
| "grad_norm": 0.5934048891067505, | |
| "learning_rate": 4.173974012853665e-05, | |
| "loss": 0.2344, | |
| "num_input_tokens_seen": 105968, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 3.4513274336283186, | |
| "grad_norm": 2.0349247455596924, | |
| "learning_rate": 4.1450960465134025e-05, | |
| "loss": 0.0518, | |
| "num_input_tokens_seen": 107472, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.495575221238938, | |
| "grad_norm": 0.06618373095989227, | |
| "learning_rate": 4.1158256336615096e-05, | |
| "loss": 0.0753, | |
| "num_input_tokens_seen": 108688, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 3.5309734513274336, | |
| "eval_loss": 0.3117164373397827, | |
| "eval_runtime": 0.7709, | |
| "eval_samples_per_second": 32.432, | |
| "eval_steps_per_second": 16.864, | |
| "num_input_tokens_seen": 109952, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 3.5398230088495577, | |
| "grad_norm": 5.4218974113464355, | |
| "learning_rate": 4.086169756912431e-05, | |
| "loss": 0.5723, | |
| "num_input_tokens_seen": 110320, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.584070796460177, | |
| "grad_norm": 2.7802650928497314, | |
| "learning_rate": 4.056135490835098e-05, | |
| "loss": 0.3036, | |
| "num_input_tokens_seen": 111952, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 3.6283185840707963, | |
| "grad_norm": 3.5373213291168213, | |
| "learning_rate": 4.025730000265251e-05, | |
| "loss": 0.1427, | |
| "num_input_tokens_seen": 113152, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 3.672566371681416, | |
| "grad_norm": 9.953773498535156, | |
| "learning_rate": 3.994960538596232e-05, | |
| "loss": 0.1798, | |
| "num_input_tokens_seen": 114592, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 3.7168141592920354, | |
| "grad_norm": 0.10559792071580887, | |
| "learning_rate": 3.963834446048644e-05, | |
| "loss": 0.3672, | |
| "num_input_tokens_seen": 115936, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.7610619469026547, | |
| "grad_norm": 0.500779926776886, | |
| "learning_rate": 3.9323591479193074e-05, | |
| "loss": 0.1044, | |
| "num_input_tokens_seen": 117232, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 3.8053097345132745, | |
| "grad_norm": 1.8762531280517578, | |
| "learning_rate": 3.90054215280991e-05, | |
| "loss": 0.2184, | |
| "num_input_tokens_seen": 118448, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.849557522123894, | |
| "grad_norm": 2.281862735748291, | |
| "learning_rate": 3.8683910508357934e-05, | |
| "loss": 0.7422, | |
| "num_input_tokens_seen": 119760, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 3.893805309734513, | |
| "grad_norm": 0.25059983134269714, | |
| "learning_rate": 3.83591351181529e-05, | |
| "loss": 0.0681, | |
| "num_input_tokens_seen": 121488, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.938053097345133, | |
| "grad_norm": 5.358190059661865, | |
| "learning_rate": 3.803117283440044e-05, | |
| "loss": 0.3385, | |
| "num_input_tokens_seen": 122816, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 3.982300884955752, | |
| "grad_norm": 3.208345413208008, | |
| "learning_rate": 3.7700101894267615e-05, | |
| "loss": 0.0398, | |
| "num_input_tokens_seen": 124352, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 4.0265486725663715, | |
| "grad_norm": 0.5785738825798035, | |
| "learning_rate": 3.736600127650817e-05, | |
| "loss": 0.4213, | |
| "num_input_tokens_seen": 125432, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 4.035398230088496, | |
| "eval_loss": 0.1032278910279274, | |
| "eval_runtime": 0.7761, | |
| "eval_samples_per_second": 32.214, | |
| "eval_steps_per_second": 16.751, | |
| "num_input_tokens_seen": 125688, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 4.070796460176991, | |
| "grad_norm": 1.3466124534606934, | |
| "learning_rate": 3.702895068262177e-05, | |
| "loss": 0.0458, | |
| "num_input_tokens_seen": 126744, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 4.115044247787611, | |
| "grad_norm": 9.679712295532227, | |
| "learning_rate": 3.66890305178407e-05, | |
| "loss": 0.0737, | |
| "num_input_tokens_seen": 128424, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 4.15929203539823, | |
| "grad_norm": 0.07868790626525879, | |
| "learning_rate": 3.63463218719489e-05, | |
| "loss": 0.1408, | |
| "num_input_tokens_seen": 129640, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 4.20353982300885, | |
| "grad_norm": 0.5945039391517639, | |
| "learning_rate": 3.600090649993741e-05, | |
| "loss": 0.1903, | |
| "num_input_tokens_seen": 131016, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 4.247787610619469, | |
| "grad_norm": 8.945996284484863, | |
| "learning_rate": 3.565286680250138e-05, | |
| "loss": 0.0682, | |
| "num_input_tokens_seen": 132552, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 4.292035398230088, | |
| "grad_norm": 1.1376537084579468, | |
| "learning_rate": 3.530228580638291e-05, | |
| "loss": 0.2208, | |
| "num_input_tokens_seen": 133624, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 4.336283185840708, | |
| "grad_norm": 0.1638191044330597, | |
| "learning_rate": 3.494924714456454e-05, | |
| "loss": 0.0085, | |
| "num_input_tokens_seen": 135464, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 4.380530973451328, | |
| "grad_norm": 0.43489018082618713, | |
| "learning_rate": 3.459383503631823e-05, | |
| "loss": 0.1116, | |
| "num_input_tokens_seen": 136664, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 4.424778761061947, | |
| "grad_norm": 14.490530967712402, | |
| "learning_rate": 3.423613426711431e-05, | |
| "loss": 0.4935, | |
| "num_input_tokens_seen": 137752, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 4.469026548672566, | |
| "grad_norm": 0.006479791831225157, | |
| "learning_rate": 3.3876230168395525e-05, | |
| "loss": 0.0561, | |
| "num_input_tokens_seen": 139000, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 4.513274336283186, | |
| "grad_norm": 0.9910786747932434, | |
| "learning_rate": 3.3514208597220705e-05, | |
| "loss": 0.0041, | |
| "num_input_tokens_seen": 140184, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 4.539823008849558, | |
| "eval_loss": 0.10921850800514221, | |
| "eval_runtime": 0.7726, | |
| "eval_samples_per_second": 32.357, | |
| "eval_steps_per_second": 16.826, | |
| "num_input_tokens_seen": 141000, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 4.557522123893805, | |
| "grad_norm": 0.04926896467804909, | |
| "learning_rate": 3.315015591578314e-05, | |
| "loss": 0.129, | |
| "num_input_tokens_seen": 141512, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 4.601769911504425, | |
| "grad_norm": 3.648982286453247, | |
| "learning_rate": 3.278415897080839e-05, | |
| "loss": 0.2731, | |
| "num_input_tokens_seen": 142888, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 4.646017699115045, | |
| "grad_norm": 0.5655894875526428, | |
| "learning_rate": 3.2416305072836557e-05, | |
| "loss": 0.1353, | |
| "num_input_tokens_seen": 144312, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 4.6902654867256635, | |
| "grad_norm": 12.462039947509766, | |
| "learning_rate": 3.204668197539385e-05, | |
| "loss": 0.3391, | |
| "num_input_tokens_seen": 146008, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 4.734513274336283, | |
| "grad_norm": 14.530315399169922, | |
| "learning_rate": 3.167537785405854e-05, | |
| "loss": 0.3179, | |
| "num_input_tokens_seen": 147448, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 4.778761061946903, | |
| "grad_norm": 0.20855922996997833, | |
| "learning_rate": 3.13024812854262e-05, | |
| "loss": 0.0135, | |
| "num_input_tokens_seen": 148952, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 4.823008849557522, | |
| "grad_norm": 0.016964532434940338, | |
| "learning_rate": 3.092808122597924e-05, | |
| "loss": 0.1094, | |
| "num_input_tokens_seen": 150472, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 4.867256637168142, | |
| "grad_norm": 17.008296966552734, | |
| "learning_rate": 3.0552266990865934e-05, | |
| "loss": 0.8798, | |
| "num_input_tokens_seen": 151880, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 4.911504424778761, | |
| "grad_norm": 1.905720829963684, | |
| "learning_rate": 3.017512823259373e-05, | |
| "loss": 0.1146, | |
| "num_input_tokens_seen": 153336, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 4.95575221238938, | |
| "grad_norm": 0.22706276178359985, | |
| "learning_rate": 2.9796754919642228e-05, | |
| "loss": 0.0376, | |
| "num_input_tokens_seen": 154584, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.08005864918231964, | |
| "learning_rate": 2.941723731500068e-05, | |
| "loss": 0.1895, | |
| "num_input_tokens_seen": 155672, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 5.04424778761062, | |
| "grad_norm": 2.539489984512329, | |
| "learning_rate": 2.9036665954635266e-05, | |
| "loss": 0.2002, | |
| "num_input_tokens_seen": 156920, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 5.04424778761062, | |
| "eval_loss": 0.1037435531616211, | |
| "eval_runtime": 0.7749, | |
| "eval_samples_per_second": 32.262, | |
| "eval_steps_per_second": 16.776, | |
| "num_input_tokens_seen": 156920, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 5.088495575221239, | |
| "grad_norm": 1.3146986961364746, | |
| "learning_rate": 2.8655131625891274e-05, | |
| "loss": 0.0115, | |
| "num_input_tokens_seen": 158872, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 5.132743362831858, | |
| "grad_norm": 0.059930458664894104, | |
| "learning_rate": 2.8272725345835278e-05, | |
| "loss": 0.002, | |
| "num_input_tokens_seen": 159960, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 5.176991150442478, | |
| "grad_norm": 0.26709067821502686, | |
| "learning_rate": 2.7889538339542527e-05, | |
| "loss": 0.0781, | |
| "num_input_tokens_seen": 161112, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 5.221238938053097, | |
| "grad_norm": 21.705875396728516, | |
| "learning_rate": 2.7505662018334698e-05, | |
| "loss": 0.0835, | |
| "num_input_tokens_seen": 162472, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 5.265486725663717, | |
| "grad_norm": 12.45948314666748, | |
| "learning_rate": 2.712118795797329e-05, | |
| "loss": 0.3655, | |
| "num_input_tokens_seen": 164216, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 5.3097345132743365, | |
| "grad_norm": 3.4451708793640137, | |
| "learning_rate": 2.6736207876813646e-05, | |
| "loss": 0.0235, | |
| "num_input_tokens_seen": 165832, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 5.353982300884955, | |
| "grad_norm": 0.08687157928943634, | |
| "learning_rate": 2.635081361392513e-05, | |
| "loss": 0.0756, | |
| "num_input_tokens_seen": 167000, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 5.398230088495575, | |
| "grad_norm": 12.41464900970459, | |
| "learning_rate": 2.596509710718235e-05, | |
| "loss": 0.0677, | |
| "num_input_tokens_seen": 168456, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 5.442477876106195, | |
| "grad_norm": 0.03950650617480278, | |
| "learning_rate": 2.5579150371332956e-05, | |
| "loss": 0.002, | |
| "num_input_tokens_seen": 169624, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 5.486725663716814, | |
| "grad_norm": 1.2885109186172485, | |
| "learning_rate": 2.5193065476046955e-05, | |
| "loss": 0.0027, | |
| "num_input_tokens_seen": 170888, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 5.530973451327434, | |
| "grad_norm": 1.0878580808639526, | |
| "learning_rate": 2.480693452395305e-05, | |
| "loss": 0.0287, | |
| "num_input_tokens_seen": 172056, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 5.548672566371682, | |
| "eval_loss": 0.08361060917377472, | |
| "eval_runtime": 0.7732, | |
| "eval_samples_per_second": 32.335, | |
| "eval_steps_per_second": 16.814, | |
| "num_input_tokens_seen": 172504, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 5.575221238938053, | |
| "grad_norm": 21.944652557373047, | |
| "learning_rate": 2.4420849628667046e-05, | |
| "loss": 0.2663, | |
| "num_input_tokens_seen": 173448, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 5.619469026548672, | |
| "grad_norm": 0.16154882311820984, | |
| "learning_rate": 2.4034902892817648e-05, | |
| "loss": 0.1753, | |
| "num_input_tokens_seen": 174872, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 5.663716814159292, | |
| "grad_norm": 0.21520113945007324, | |
| "learning_rate": 2.3649186386074872e-05, | |
| "loss": 0.0021, | |
| "num_input_tokens_seen": 176488, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 5.707964601769912, | |
| "grad_norm": 0.9986706376075745, | |
| "learning_rate": 2.3263792123186353e-05, | |
| "loss": 0.0118, | |
| "num_input_tokens_seen": 177816, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 5.752212389380531, | |
| "grad_norm": 14.675823211669922, | |
| "learning_rate": 2.2878812042026713e-05, | |
| "loss": 0.0679, | |
| "num_input_tokens_seen": 179352, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 5.79646017699115, | |
| "grad_norm": 0.008065628819167614, | |
| "learning_rate": 2.2494337981665304e-05, | |
| "loss": 0.0226, | |
| "num_input_tokens_seen": 180504, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 5.84070796460177, | |
| "grad_norm": 17.46634864807129, | |
| "learning_rate": 2.2110461660457482e-05, | |
| "loss": 0.161, | |
| "num_input_tokens_seen": 181976, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 5.88495575221239, | |
| "grad_norm": 1.4565507173538208, | |
| "learning_rate": 2.1727274654164728e-05, | |
| "loss": 0.1769, | |
| "num_input_tokens_seen": 183000, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 5.929203539823009, | |
| "grad_norm": 0.0984095111489296, | |
| "learning_rate": 2.1344868374108732e-05, | |
| "loss": 0.0023, | |
| "num_input_tokens_seen": 184344, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 5.9734513274336285, | |
| "grad_norm": 9.466740608215332, | |
| "learning_rate": 2.096333404536474e-05, | |
| "loss": 0.0631, | |
| "num_input_tokens_seen": 185976, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 6.017699115044247, | |
| "grad_norm": 0.01006875280290842, | |
| "learning_rate": 2.0582762684999327e-05, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 187312, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 6.053097345132743, | |
| "eval_loss": 0.051821138709783554, | |
| "eval_runtime": 0.7739, | |
| "eval_samples_per_second": 32.302, | |
| "eval_steps_per_second": 16.797, | |
| "num_input_tokens_seen": 188160, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 6.061946902654867, | |
| "grad_norm": 11.252352714538574, | |
| "learning_rate": 2.0203245080357775e-05, | |
| "loss": 0.1977, | |
| "num_input_tokens_seen": 188512, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 6.106194690265487, | |
| "grad_norm": 0.029097890481352806, | |
| "learning_rate": 1.982487176740627e-05, | |
| "loss": 0.009, | |
| "num_input_tokens_seen": 189616, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 6.150442477876107, | |
| "grad_norm": 0.006874327547848225, | |
| "learning_rate": 1.944773300913407e-05, | |
| "loss": 0.0074, | |
| "num_input_tokens_seen": 191264, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 6.1946902654867255, | |
| "grad_norm": 0.07622929662466049, | |
| "learning_rate": 1.907191877402076e-05, | |
| "loss": 0.0018, | |
| "num_input_tokens_seen": 192576, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 6.238938053097345, | |
| "grad_norm": 0.07754877954721451, | |
| "learning_rate": 1.8697518714573807e-05, | |
| "loss": 0.0152, | |
| "num_input_tokens_seen": 193920, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 6.283185840707965, | |
| "grad_norm": 0.005654957611113787, | |
| "learning_rate": 1.832462214594146e-05, | |
| "loss": 0.0014, | |
| "num_input_tokens_seen": 195216, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 6.327433628318584, | |
| "grad_norm": 0.14916832745075226, | |
| "learning_rate": 1.7953318024606157e-05, | |
| "loss": 0.0015, | |
| "num_input_tokens_seen": 196640, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 6.371681415929204, | |
| "grad_norm": 0.03626057133078575, | |
| "learning_rate": 1.7583694927163453e-05, | |
| "loss": 0.0401, | |
| "num_input_tokens_seen": 197696, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 6.415929203539823, | |
| "grad_norm": 0.057499129325151443, | |
| "learning_rate": 1.7215841029191616e-05, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 199040, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 6.460176991150442, | |
| "grad_norm": 0.2001536786556244, | |
| "learning_rate": 1.6849844084216874e-05, | |
| "loss": 0.0061, | |
| "num_input_tokens_seen": 200928, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 6.504424778761062, | |
| "grad_norm": 0.045186158269643784, | |
| "learning_rate": 1.648579140277931e-05, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 202784, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 6.548672566371682, | |
| "grad_norm": 0.09090372174978256, | |
| "learning_rate": 1.612376983160449e-05, | |
| "loss": 0.0071, | |
| "num_input_tokens_seen": 204080, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 6.557522123893805, | |
| "eval_loss": 0.012937678955495358, | |
| "eval_runtime": 0.7734, | |
| "eval_samples_per_second": 32.325, | |
| "eval_steps_per_second": 16.809, | |
| "num_input_tokens_seen": 204240, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 6.592920353982301, | |
| "grad_norm": 0.22665704786777496, | |
| "learning_rate": 1.576386573288569e-05, | |
| "loss": 0.0012, | |
| "num_input_tokens_seen": 205296, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 6.6371681415929205, | |
| "grad_norm": 0.053931985050439835, | |
| "learning_rate": 1.5406164963681774e-05, | |
| "loss": 0.0009, | |
| "num_input_tokens_seen": 206768, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 6.68141592920354, | |
| "grad_norm": 0.282073050737381, | |
| "learning_rate": 1.5050752855435457e-05, | |
| "loss": 0.0011, | |
| "num_input_tokens_seen": 208048, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 6.725663716814159, | |
| "grad_norm": 10.507343292236328, | |
| "learning_rate": 1.4697714193617097e-05, | |
| "loss": 0.0132, | |
| "num_input_tokens_seen": 209200, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 6.769911504424779, | |
| "grad_norm": 0.005959831643849611, | |
| "learning_rate": 1.4347133197498619e-05, | |
| "loss": 0.0015, | |
| "num_input_tokens_seen": 210448, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 6.814159292035399, | |
| "grad_norm": 30.997230529785156, | |
| "learning_rate": 1.3999093500062587e-05, | |
| "loss": 0.2387, | |
| "num_input_tokens_seen": 211984, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 6.8584070796460175, | |
| "grad_norm": 0.04386006295681, | |
| "learning_rate": 1.3653678128051106e-05, | |
| "loss": 0.0007, | |
| "num_input_tokens_seen": 213328, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 6.902654867256637, | |
| "grad_norm": 7.10620641708374, | |
| "learning_rate": 1.3310969482159297e-05, | |
| "loss": 0.0646, | |
| "num_input_tokens_seen": 214400, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 6.946902654867257, | |
| "grad_norm": 0.07594583928585052, | |
| "learning_rate": 1.2971049317378237e-05, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 215888, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 6.991150442477876, | |
| "grad_norm": 0.006478244438767433, | |
| "learning_rate": 1.263399872349183e-05, | |
| "loss": 0.003, | |
| "num_input_tokens_seen": 217664, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 7.035398230088496, | |
| "grad_norm": 0.1744823157787323, | |
| "learning_rate": 1.2299898105732385e-05, | |
| "loss": 0.0039, | |
| "num_input_tokens_seen": 218840, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 7.061946902654867, | |
| "eval_loss": 0.022299928590655327, | |
| "eval_runtime": 0.777, | |
| "eval_samples_per_second": 32.173, | |
| "eval_steps_per_second": 16.73, | |
| "num_input_tokens_seen": 219512, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 7.079646017699115, | |
| "grad_norm": 0.02160412073135376, | |
| "learning_rate": 1.1968827165599564e-05, | |
| "loss": 0.0007, | |
| "num_input_tokens_seen": 219992, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 7.123893805309734, | |
| "grad_norm": 0.010718903504312038, | |
| "learning_rate": 1.1640864881847105e-05, | |
| "loss": 0.0011, | |
| "num_input_tokens_seen": 221160, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 7.168141592920354, | |
| "grad_norm": 0.011407556012272835, | |
| "learning_rate": 1.1316089491642076e-05, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 222696, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 7.212389380530974, | |
| "grad_norm": 0.014685764908790588, | |
| "learning_rate": 1.099457847190091e-05, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 224088, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 7.256637168141593, | |
| "grad_norm": 0.02897009812295437, | |
| "learning_rate": 1.067640852080693e-05, | |
| "loss": 0.0605, | |
| "num_input_tokens_seen": 225432, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 7.300884955752212, | |
| "grad_norm": 0.0066994777880609035, | |
| "learning_rate": 1.0361655539513565e-05, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 226696, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 7.345132743362832, | |
| "grad_norm": 0.007768039591610432, | |
| "learning_rate": 1.0050394614037687e-05, | |
| "loss": 0.001, | |
| "num_input_tokens_seen": 228168, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 7.389380530973451, | |
| "grad_norm": 0.07913458347320557, | |
| "learning_rate": 9.742699997347498e-06, | |
| "loss": 0.0009, | |
| "num_input_tokens_seen": 229528, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 7.433628318584071, | |
| "grad_norm": 0.011403764598071575, | |
| "learning_rate": 9.438645091649029e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 231208, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 7.477876106194691, | |
| "grad_norm": 0.030279316008090973, | |
| "learning_rate": 9.138302430875695e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 232392, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 7.522123893805309, | |
| "grad_norm": 0.01019184198230505, | |
| "learning_rate": 8.841743663384913e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 233480, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 7.566371681415929, | |
| "grad_norm": 0.01505549531430006, | |
| "learning_rate": 8.549039534865979e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 235256, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 7.566371681415929, | |
| "eval_loss": 0.021708844229578972, | |
| "eval_runtime": 0.7819, | |
| "eval_samples_per_second": 31.972, | |
| "eval_steps_per_second": 16.625, | |
| "num_input_tokens_seen": 235256, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 7.610619469026549, | |
| "grad_norm": 0.03689683601260185, | |
| "learning_rate": 8.260259871463352e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 236744, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 7.654867256637168, | |
| "grad_norm": 0.0034927541855722666, | |
| "learning_rate": 7.975473563119216e-06, | |
| "loss": 0.0009, | |
| "num_input_tokens_seen": 237880, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 7.699115044247788, | |
| "grad_norm": 0.6410696506500244, | |
| "learning_rate": 7.69474854713943e-06, | |
| "loss": 0.0015, | |
| "num_input_tokens_seen": 239544, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 7.743362831858407, | |
| "grad_norm": 0.01861269399523735, | |
| "learning_rate": 7.4181517919866914e-06, | |
| "loss": 0.0182, | |
| "num_input_tokens_seen": 241048, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 7.787610619469026, | |
| "grad_norm": 0.003941065166145563, | |
| "learning_rate": 7.1457492813049e-06, | |
| "loss": 0.0022, | |
| "num_input_tokens_seen": 242408, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 7.831858407079646, | |
| "grad_norm": 0.10631576925516129, | |
| "learning_rate": 6.877605998178344e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 243688, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 7.876106194690266, | |
| "grad_norm": 0.20832419395446777, | |
| "learning_rate": 6.613785909629686e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 245320, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 7.920353982300885, | |
| "grad_norm": 0.004488417878746986, | |
| "learning_rate": 6.354351951360268e-06, | |
| "loss": 0.0023, | |
| "num_input_tokens_seen": 246584, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 7.964601769911504, | |
| "grad_norm": 0.11372503638267517, | |
| "learning_rate": 6.099366012736438e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 248024, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 8.008849557522124, | |
| "grad_norm": 0.05825456231832504, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 249024, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 8.053097345132743, | |
| "grad_norm": 0.051966357976198196, | |
| "learning_rate": 5.602980431885024e-06, | |
| "loss": 0.0007, | |
| "num_input_tokens_seen": 250672, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 8.070796460176991, | |
| "eval_loss": 0.013118009082973003, | |
| "eval_runtime": 0.7874, | |
| "eval_samples_per_second": 31.752, | |
| "eval_steps_per_second": 16.511, | |
| "num_input_tokens_seen": 251392, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 8.097345132743364, | |
| "grad_norm": 0.005551676731556654, | |
| "learning_rate": 5.361699205108042e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 252368, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 8.141592920353983, | |
| "grad_norm": 0.006533629726618528, | |
| "learning_rate": 5.125102800629203e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 253920, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 8.185840707964601, | |
| "grad_norm": 0.007273109629750252, | |
| "learning_rate": 4.893247659793568e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 255248, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 8.230088495575222, | |
| "grad_norm": 0.010464943014085293, | |
| "learning_rate": 4.666189092892245e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 256336, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 8.274336283185841, | |
| "grad_norm": 0.004114061594009399, | |
| "learning_rate": 4.443981265967806e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 257648, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 8.31858407079646, | |
| "grad_norm": 0.09662500768899918, | |
| "learning_rate": 4.226677187892739e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 258752, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 8.36283185840708, | |
| "grad_norm": 0.08983103185892105, | |
| "learning_rate": 4.014328697723835e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 260144, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 8.4070796460177, | |
| "grad_norm": 0.012256860733032227, | |
| "learning_rate": 3.806986452335748e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 261328, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 8.451327433628318, | |
| "grad_norm": 0.022380976006388664, | |
| "learning_rate": 3.604699914336515e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 262784, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 8.495575221238939, | |
| "grad_norm": 0.00660680141299963, | |
| "learning_rate": 3.4075173402679574e-06, | |
| "loss": 0.0007, | |
| "num_input_tokens_seen": 264448, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 8.539823008849558, | |
| "grad_norm": 0.9003037214279175, | |
| "learning_rate": 3.215485769093862e-06, | |
| "loss": 0.0022, | |
| "num_input_tokens_seen": 265920, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 8.575221238938052, | |
| "eval_loss": 0.014585654251277447, | |
| "eval_runtime": 0.7764, | |
| "eval_samples_per_second": 32.201, | |
| "eval_steps_per_second": 16.744, | |
| "num_input_tokens_seen": 266784, | |
| "step": 969 | |
| }, | |
| { | |
| "epoch": 8.584070796460177, | |
| "grad_norm": 0.025870338082313538, | |
| "learning_rate": 3.028651010978556e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 266992, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 8.628318584070797, | |
| "grad_norm": 0.011923308484256268, | |
| "learning_rate": 2.8470576363586633e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 268688, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 8.672566371681416, | |
| "grad_norm": 0.01803506724536419, | |
| "learning_rate": 2.67074896531056e-06, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 270000, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 8.716814159292035, | |
| "grad_norm": 0.00940409954637289, | |
| "learning_rate": 2.499767057216193e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 271056, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 8.761061946902656, | |
| "grad_norm": 0.028705783188343048, | |
| "learning_rate": 2.334152700729511e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 272608, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 8.805309734513274, | |
| "grad_norm": 0.013868856243789196, | |
| "learning_rate": 2.1739454040461794e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 274000, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 8.849557522123893, | |
| "grad_norm": 0.015428607352077961, | |
| "learning_rate": 2.019183385478654e-06, | |
| "loss": 0.0005, | |
| "num_input_tokens_seen": 275200, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 8.893805309734514, | |
| "grad_norm": 1.5666464567184448, | |
| "learning_rate": 1.8699035643389928e-06, | |
| "loss": 0.0031, | |
| "num_input_tokens_seen": 276352, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 8.938053097345133, | |
| "grad_norm": 0.10010976344347, | |
| "learning_rate": 1.7261415521315798e-06, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 277952, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 8.982300884955752, | |
| "grad_norm": 0.005919489078223705, | |
| "learning_rate": 1.587931644057783e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 279328, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 9.026548672566372, | |
| "grad_norm": 0.00307848840020597, | |
| "learning_rate": 1.455306810834678e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 280456, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 9.070796460176991, | |
| "grad_norm": 0.015283796936273575, | |
| "learning_rate": 1.3282986908296713e-06, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 281752, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 9.079646017699115, | |
| "eval_loss": 0.015441279858350754, | |
| "eval_runtime": 0.7746, | |
| "eval_samples_per_second": 32.275, | |
| "eval_steps_per_second": 16.783, | |
| "num_input_tokens_seen": 281976, | |
| "step": 1026 | |
| }, | |
| { | |
| "epoch": 9.11504424778761, | |
| "grad_norm": 0.0041525596752762794, | |
| "learning_rate": 1.2069375825130292e-06, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 283464, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 9.15929203539823, | |
| "grad_norm": 0.016132457181811333, | |
| "learning_rate": 1.091252437230003e-06, | |
| "loss": 0.0006, | |
| "num_input_tokens_seen": 284760, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 9.20353982300885, | |
| "grad_norm": 0.01380851399153471, | |
| "learning_rate": 9.812708522943164e-07, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 286104, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 9.247787610619469, | |
| "grad_norm": 0.005584151484072208, | |
| "learning_rate": 8.770190644047077e-07, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 287672, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 9.29203539823009, | |
| "grad_norm": 0.09154026955366135, | |
| "learning_rate": 7.785219433859847e-07, | |
| "loss": 0.0007, | |
| "num_input_tokens_seen": 288920, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 9.336283185840708, | |
| "grad_norm": 0.002845821203663945, | |
| "learning_rate": 6.858029862562343e-07, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 289960, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 9.380530973451327, | |
| "grad_norm": 0.013985905796289444, | |
| "learning_rate": 5.988843116214616e-07, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 291544, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 9.424778761061948, | |
| "grad_norm": 0.029856637120246887, | |
| "learning_rate": 5.177866543990689e-07, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 292808, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 9.469026548672566, | |
| "grad_norm": 0.05611226707696915, | |
| "learning_rate": 4.425293608714548e-07, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 293912, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 9.513274336283185, | |
| "grad_norm": 0.046232786029577255, | |
| "learning_rate": 3.7313038407083003e-07, | |
| "loss": 0.0023, | |
| "num_input_tokens_seen": 295416, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 9.557522123893806, | |
| "grad_norm": 0.007815594784915447, | |
| "learning_rate": 3.0960627949644107e-07, | |
| "loss": 0.0006, | |
| "num_input_tokens_seen": 297016, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 9.584070796460177, | |
| "eval_loss": 0.016118336468935013, | |
| "eval_runtime": 0.7771, | |
| "eval_samples_per_second": 32.172, | |
| "eval_steps_per_second": 16.729, | |
| "num_input_tokens_seen": 297688, | |
| "step": 1083 | |
| }, | |
| { | |
| "epoch": 9.601769911504425, | |
| "grad_norm": 0.003943083342164755, | |
| "learning_rate": 2.5197220116515987e-07, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 298168, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 9.646017699115044, | |
| "grad_norm": 0.03541100025177002, | |
| "learning_rate": 2.0024189799641435e-07, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 299672, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 9.690265486725664, | |
| "grad_norm": 0.011666457168757915, | |
| "learning_rate": 1.5442771053230665e-07, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 301080, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 9.734513274336283, | |
| "grad_norm": 0.012163754552602768, | |
| "learning_rate": 1.1454056799370672e-07, | |
| "loss": 0.0008, | |
| "num_input_tokens_seen": 302472, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 9.778761061946902, | |
| "grad_norm": 0.01618136093020439, | |
| "learning_rate": 8.058998567304633e-08, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 303592, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 9.823008849557523, | |
| "grad_norm": 0.0019596414640545845, | |
| "learning_rate": 5.2584062664368105e-08, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 305224, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 9.867256637168142, | |
| "grad_norm": 0.026996349915862083, | |
| "learning_rate": 3.0529479931265535e-08, | |
| "loss": 0.0003, | |
| "num_input_tokens_seen": 306200, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 9.91150442477876, | |
| "grad_norm": 0.019850771874189377, | |
| "learning_rate": 1.443149871309113e-08, | |
| "loss": 0.0004, | |
| "num_input_tokens_seen": 307880, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 9.955752212389381, | |
| "grad_norm": 0.00854608416557312, | |
| "learning_rate": 4.293959269863201e-09, | |
| "loss": 0.0002, | |
| "num_input_tokens_seen": 309240, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.010417921468615532, | |
| "learning_rate": 1.192799661514643e-10, | |
| "loss": 0.0001, | |
| "num_input_tokens_seen": 310504, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "num_input_tokens_seen": 310504, | |
| "step": 1130, | |
| "total_flos": 1.3981854782128128e+16, | |
| "train_loss": 0.2885503941710855, | |
| "train_runtime": 156.0359, | |
| "train_samples_per_second": 14.42, | |
| "train_steps_per_second": 7.242 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1130, | |
| "num_input_tokens_seen": 310504, | |
| "num_train_epochs": 10, | |
| "save_steps": 57, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3981854782128128e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |