| { | |
| "best_global_step": 551, | |
| "best_metric": 3.8482749462127686, | |
| "best_model_checkpoint": "saves/prefix-tuning/llama-3-8b-instruct/train_cb_1754652159/checkpoint-551", | |
| "epoch": 10.0, | |
| "eval_steps": 29, | |
| "global_step": 570, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 1.9907561540603638, | |
| "learning_rate": 3.5087719298245615e-06, | |
| "loss": 12.757, | |
| "num_input_tokens_seen": 3552, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 2.235771894454956, | |
| "learning_rate": 7.894736842105263e-06, | |
| "loss": 12.4922, | |
| "num_input_tokens_seen": 7264, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 2.3024075031280518, | |
| "learning_rate": 1.2280701754385964e-05, | |
| "loss": 12.5261, | |
| "num_input_tokens_seen": 10528, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 1.8383631706237793, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 12.3027, | |
| "num_input_tokens_seen": 14720, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 2.205447196960449, | |
| "learning_rate": 2.105263157894737e-05, | |
| "loss": 12.6939, | |
| "num_input_tokens_seen": 18016, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5087719298245614, | |
| "eval_loss": 12.153342247009277, | |
| "eval_runtime": 0.6127, | |
| "eval_samples_per_second": 40.802, | |
| "eval_steps_per_second": 11.424, | |
| "num_input_tokens_seen": 20064, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 2.085423707962036, | |
| "learning_rate": 2.5438596491228074e-05, | |
| "loss": 12.5997, | |
| "num_input_tokens_seen": 20640, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 2.133491277694702, | |
| "learning_rate": 2.9824561403508772e-05, | |
| "loss": 12.6701, | |
| "num_input_tokens_seen": 24800, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 1.9534471035003662, | |
| "learning_rate": 3.421052631578947e-05, | |
| "loss": 11.6756, | |
| "num_input_tokens_seen": 28064, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 2.0729124546051025, | |
| "learning_rate": 3.859649122807018e-05, | |
| "loss": 11.3441, | |
| "num_input_tokens_seen": 30944, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 2.010507106781006, | |
| "learning_rate": 4.298245614035088e-05, | |
| "loss": 11.7411, | |
| "num_input_tokens_seen": 33664, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 2.055694580078125, | |
| "learning_rate": 4.736842105263158e-05, | |
| "loss": 11.0368, | |
| "num_input_tokens_seen": 36320, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.0175438596491229, | |
| "eval_loss": 10.890871047973633, | |
| "eval_runtime": 0.6065, | |
| "eval_samples_per_second": 41.218, | |
| "eval_steps_per_second": 11.541, | |
| "num_input_tokens_seen": 37832, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 2.116218090057373, | |
| "learning_rate": 4.999812487773597e-05, | |
| "loss": 11.3165, | |
| "num_input_tokens_seen": 39080, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 2.034322738647461, | |
| "learning_rate": 4.997703298253406e-05, | |
| "loss": 11.1092, | |
| "num_input_tokens_seen": 42536, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.2280701754385965, | |
| "grad_norm": 1.8908439874649048, | |
| "learning_rate": 4.993252512887069e-05, | |
| "loss": 10.783, | |
| "num_input_tokens_seen": 45608, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 2.179896116256714, | |
| "learning_rate": 4.986464304284091e-05, | |
| "loss": 9.9158, | |
| "num_input_tokens_seen": 49352, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.4035087719298245, | |
| "grad_norm": 1.8387644290924072, | |
| "learning_rate": 4.977345036387331e-05, | |
| "loss": 10.1963, | |
| "num_input_tokens_seen": 52328, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4912280701754386, | |
| "grad_norm": 2.1669516563415527, | |
| "learning_rate": 4.965903258506806e-05, | |
| "loss": 10.4486, | |
| "num_input_tokens_seen": 56328, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.526315789473684, | |
| "eval_loss": 9.39639949798584, | |
| "eval_runtime": 0.6062, | |
| "eval_samples_per_second": 41.24, | |
| "eval_steps_per_second": 11.547, | |
| "num_input_tokens_seen": 57288, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 2.1507346630096436, | |
| "learning_rate": 4.952149697304716e-05, | |
| "loss": 9.7679, | |
| "num_input_tokens_seen": 59048, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 1.8538447618484497, | |
| "learning_rate": 4.9360972467392056e-05, | |
| "loss": 8.7756, | |
| "num_input_tokens_seen": 62504, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.7543859649122808, | |
| "grad_norm": 2.315293550491333, | |
| "learning_rate": 4.917760955976277e-05, | |
| "loss": 8.4432, | |
| "num_input_tokens_seen": 65832, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 1.8978357315063477, | |
| "learning_rate": 4.897158015281209e-05, | |
| "loss": 8.6132, | |
| "num_input_tokens_seen": 68808, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.9298245614035088, | |
| "grad_norm": 1.8035889863967896, | |
| "learning_rate": 4.874307739902689e-05, | |
| "loss": 8.4403, | |
| "num_input_tokens_seen": 71848, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.017543859649123, | |
| "grad_norm": 1.7996368408203125, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 7.9533, | |
| "num_input_tokens_seen": 74040, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.0350877192982457, | |
| "eval_loss": 7.922963619232178, | |
| "eval_runtime": 0.6109, | |
| "eval_samples_per_second": 40.923, | |
| "eval_steps_per_second": 11.458, | |
| "num_input_tokens_seen": 74520, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 1.788129448890686, | |
| "learning_rate": 4.821952960383649e-05, | |
| "loss": 8.1754, | |
| "num_input_tokens_seen": 77272, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.192982456140351, | |
| "grad_norm": 1.6834213733673096, | |
| "learning_rate": 4.7924975388280524e-05, | |
| "loss": 8.2941, | |
| "num_input_tokens_seen": 80280, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.280701754385965, | |
| "grad_norm": 1.727159857749939, | |
| "learning_rate": 4.760892901743944e-05, | |
| "loss": 8.0539, | |
| "num_input_tokens_seen": 83480, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 1.7604347467422485, | |
| "learning_rate": 4.727168678465988e-05, | |
| "loss": 7.7214, | |
| "num_input_tokens_seen": 86232, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.456140350877193, | |
| "grad_norm": 1.5606811046600342, | |
| "learning_rate": 4.6913564854400595e-05, | |
| "loss": 7.7251, | |
| "num_input_tokens_seen": 89656, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.543859649122807, | |
| "grad_norm": 1.685321569442749, | |
| "learning_rate": 4.6534898965828405e-05, | |
| "loss": 7.4909, | |
| "num_input_tokens_seen": 93080, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.543859649122807, | |
| "eval_loss": 6.920214653015137, | |
| "eval_runtime": 0.6094, | |
| "eval_samples_per_second": 41.022, | |
| "eval_steps_per_second": 11.486, | |
| "num_input_tokens_seen": 93080, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 1.6069295406341553, | |
| "learning_rate": 4.613604411806285e-05, | |
| "loss": 6.7691, | |
| "num_input_tokens_seen": 96440, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.719298245614035, | |
| "grad_norm": 1.6741470098495483, | |
| "learning_rate": 4.5717374237364665e-05, | |
| "loss": 7.0387, | |
| "num_input_tokens_seen": 100280, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.807017543859649, | |
| "grad_norm": 1.6800447702407837, | |
| "learning_rate": 4.5279281826580056e-05, | |
| "loss": 6.33, | |
| "num_input_tokens_seen": 103512, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 1.4865342378616333, | |
| "learning_rate": 4.482217759716946e-05, | |
| "loss": 6.4127, | |
| "num_input_tokens_seen": 106168, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.982456140350877, | |
| "grad_norm": 1.5618782043457031, | |
| "learning_rate": 4.434649008416565e-05, | |
| "loss": 6.0778, | |
| "num_input_tokens_seen": 109624, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.0526315789473686, | |
| "eval_loss": 6.102657794952393, | |
| "eval_runtime": 0.6108, | |
| "eval_samples_per_second": 40.931, | |
| "eval_steps_per_second": 11.461, | |
| "num_input_tokens_seen": 111928, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 3.0701754385964914, | |
| "grad_norm": 1.8464562892913818, | |
| "learning_rate": 4.385266524442241e-05, | |
| "loss": 6.0082, | |
| "num_input_tokens_seen": 112472, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 3.1578947368421053, | |
| "grad_norm": 1.7316503524780273, | |
| "learning_rate": 4.334116603853007e-05, | |
| "loss": 6.3538, | |
| "num_input_tokens_seen": 115576, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.245614035087719, | |
| "grad_norm": 2.099435329437256, | |
| "learning_rate": 4.2812471996790206e-05, | |
| "loss": 6.1122, | |
| "num_input_tokens_seen": 119000, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 1.4580914974212646, | |
| "learning_rate": 4.226707876965611e-05, | |
| "loss": 6.0652, | |
| "num_input_tokens_seen": 122360, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.4210526315789473, | |
| "grad_norm": 1.372891902923584, | |
| "learning_rate": 4.1705497663060767e-05, | |
| "loss": 5.6347, | |
| "num_input_tokens_seen": 125496, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 3.5087719298245617, | |
| "grad_norm": 1.5532852411270142, | |
| "learning_rate": 4.1128255159067665e-05, | |
| "loss": 6.4246, | |
| "num_input_tokens_seen": 128760, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.56140350877193, | |
| "eval_loss": 5.588784217834473, | |
| "eval_runtime": 0.6092, | |
| "eval_samples_per_second": 41.039, | |
| "eval_steps_per_second": 11.491, | |
| "num_input_tokens_seen": 131160, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 3.5964912280701755, | |
| "grad_norm": 1.3230892419815063, | |
| "learning_rate": 4.053589242229412e-05, | |
| "loss": 5.8833, | |
| "num_input_tokens_seen": 132248, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 3.6842105263157894, | |
| "grad_norm": 1.3423362970352173, | |
| "learning_rate": 3.9928964792569655e-05, | |
| "loss": 5.4105, | |
| "num_input_tokens_seen": 135160, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.7719298245614032, | |
| "grad_norm": 1.3119643926620483, | |
| "learning_rate": 3.930804126430513e-05, | |
| "loss": 5.8935, | |
| "num_input_tokens_seen": 138200, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.8596491228070176, | |
| "grad_norm": 2.161855697631836, | |
| "learning_rate": 3.867370395306068e-05, | |
| "loss": 5.5327, | |
| "num_input_tokens_seen": 141752, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.9473684210526314, | |
| "grad_norm": 1.5290467739105225, | |
| "learning_rate": 3.8026547549812665e-05, | |
| "loss": 6.2326, | |
| "num_input_tokens_seen": 146008, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 4.035087719298246, | |
| "grad_norm": 1.4180949926376343, | |
| "learning_rate": 3.736717876343106e-05, | |
| "loss": 5.7896, | |
| "num_input_tokens_seen": 148648, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.0701754385964914, | |
| "eval_loss": 5.226391792297363, | |
| "eval_runtime": 0.6095, | |
| "eval_samples_per_second": 41.015, | |
| "eval_steps_per_second": 11.484, | |
| "num_input_tokens_seen": 150056, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 4.12280701754386, | |
| "grad_norm": 1.4968349933624268, | |
| "learning_rate": 3.66962157518902e-05, | |
| "loss": 5.3999, | |
| "num_input_tokens_seen": 151656, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 4.2105263157894735, | |
| "grad_norm": 1.1194733381271362, | |
| "learning_rate": 3.601428754274584e-05, | |
| "loss": 5.3437, | |
| "num_input_tokens_seen": 154280, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.298245614035087, | |
| "grad_norm": 1.4553688764572144, | |
| "learning_rate": 3.532203344342212e-05, | |
| "loss": 5.4678, | |
| "num_input_tokens_seen": 157160, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 4.385964912280702, | |
| "grad_norm": 1.4682071208953857, | |
| "learning_rate": 3.4620102441861143e-05, | |
| "loss": 5.4829, | |
| "num_input_tokens_seen": 160072, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.473684210526316, | |
| "grad_norm": 1.5301012992858887, | |
| "learning_rate": 3.390915259809696e-05, | |
| "loss": 5.1392, | |
| "num_input_tokens_seen": 163752, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 4.56140350877193, | |
| "grad_norm": 1.37416410446167, | |
| "learning_rate": 3.318985042732461e-05, | |
| "loss": 4.6737, | |
| "num_input_tokens_seen": 166600, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.578947368421053, | |
| "eval_loss": 4.920877933502197, | |
| "eval_runtime": 0.6082, | |
| "eval_samples_per_second": 41.106, | |
| "eval_steps_per_second": 11.51, | |
| "num_input_tokens_seen": 167208, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 4.649122807017544, | |
| "grad_norm": 1.3864611387252808, | |
| "learning_rate": 3.246287027504237e-05, | |
| "loss": 5.0123, | |
| "num_input_tokens_seen": 169704, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 4.7368421052631575, | |
| "grad_norm": 1.261285424232483, | |
| "learning_rate": 3.172889368485311e-05, | |
| "loss": 4.5466, | |
| "num_input_tokens_seen": 173000, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.824561403508772, | |
| "grad_norm": 2.1658103466033936, | |
| "learning_rate": 3.0988608759517475e-05, | |
| "loss": 5.6943, | |
| "num_input_tokens_seen": 177128, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 4.912280701754386, | |
| "grad_norm": 1.3422341346740723, | |
| "learning_rate": 3.0242709515857758e-05, | |
| "loss": 4.4906, | |
| "num_input_tokens_seen": 180680, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 2.7265498638153076, | |
| "learning_rate": 2.949189523411747e-05, | |
| "loss": 4.466, | |
| "num_input_tokens_seen": 183280, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 5.087719298245614, | |
| "grad_norm": 1.6141693592071533, | |
| "learning_rate": 2.8736869802386364e-05, | |
| "loss": 4.9442, | |
| "num_input_tokens_seen": 186160, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 5.087719298245614, | |
| "eval_loss": 4.636929512023926, | |
| "eval_runtime": 0.6133, | |
| "eval_samples_per_second": 40.763, | |
| "eval_steps_per_second": 11.414, | |
| "num_input_tokens_seen": 186160, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 5.175438596491228, | |
| "grad_norm": 1.650472640991211, | |
| "learning_rate": 2.797834105670559e-05, | |
| "loss": 4.6986, | |
| "num_input_tokens_seen": 189584, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 5.2631578947368425, | |
| "grad_norm": 1.516791582107544, | |
| "learning_rate": 2.7217020117471793e-05, | |
| "loss": 4.6943, | |
| "num_input_tokens_seen": 193296, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 5.350877192982456, | |
| "grad_norm": 1.355210542678833, | |
| "learning_rate": 2.6453620722761896e-05, | |
| "loss": 4.8374, | |
| "num_input_tokens_seen": 197328, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 5.43859649122807, | |
| "grad_norm": 1.489431619644165, | |
| "learning_rate": 2.5688858559204053e-05, | |
| "loss": 4.4153, | |
| "num_input_tokens_seen": 200560, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.526315789473684, | |
| "grad_norm": 1.0992668867111206, | |
| "learning_rate": 2.492345059102164e-05, | |
| "loss": 4.2959, | |
| "num_input_tokens_seen": 203632, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 5.5964912280701755, | |
| "eval_loss": 4.427004337310791, | |
| "eval_runtime": 0.6091, | |
| "eval_samples_per_second": 41.044, | |
| "eval_steps_per_second": 11.492, | |
| "num_input_tokens_seen": 206000, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 5.614035087719298, | |
| "grad_norm": 1.5276824235916138, | |
| "learning_rate": 2.4158114387879616e-05, | |
| "loss": 4.7125, | |
| "num_input_tokens_seen": 206480, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.701754385964913, | |
| "grad_norm": 1.5830577611923218, | |
| "learning_rate": 2.3393567452163252e-05, | |
| "loss": 4.4515, | |
| "num_input_tokens_seen": 209232, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.7894736842105265, | |
| "grad_norm": 1.2052812576293945, | |
| "learning_rate": 2.2630526546319914e-05, | |
| "loss": 4.7611, | |
| "num_input_tokens_seen": 213168, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 5.87719298245614, | |
| "grad_norm": 1.3054100275039673, | |
| "learning_rate": 2.186970702089457e-05, | |
| "loss": 4.8574, | |
| "num_input_tokens_seen": 216752, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 5.964912280701754, | |
| "grad_norm": 1.9110170602798462, | |
| "learning_rate": 2.111182214388893e-05, | |
| "loss": 4.0179, | |
| "num_input_tokens_seen": 219536, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 6.052631578947368, | |
| "grad_norm": 1.5975571870803833, | |
| "learning_rate": 2.0357582432072957e-05, | |
| "loss": 4.0552, | |
| "num_input_tokens_seen": 222272, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 6.105263157894737, | |
| "eval_loss": 4.272974014282227, | |
| "eval_runtime": 0.6081, | |
| "eval_samples_per_second": 41.114, | |
| "eval_steps_per_second": 11.512, | |
| "num_input_tokens_seen": 224064, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 6.140350877192983, | |
| "grad_norm": 1.9832843542099, | |
| "learning_rate": 1.9607694984875754e-05, | |
| "loss": 4.9111, | |
| "num_input_tokens_seen": 225664, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 6.228070175438597, | |
| "grad_norm": 1.5513900518417358, | |
| "learning_rate": 1.8862862821480025e-05, | |
| "loss": 4.4043, | |
| "num_input_tokens_seen": 229408, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 6.315789473684211, | |
| "grad_norm": 1.2998956441879272, | |
| "learning_rate": 1.8123784221741964e-05, | |
| "loss": 4.8502, | |
| "num_input_tokens_seen": 232832, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 6.4035087719298245, | |
| "grad_norm": 1.7501558065414429, | |
| "learning_rate": 1.73911520715541e-05, | |
| "loss": 4.1639, | |
| "num_input_tokens_seen": 236288, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 6.491228070175438, | |
| "grad_norm": 1.292130708694458, | |
| "learning_rate": 1.666565321326512e-05, | |
| "loss": 3.9357, | |
| "num_input_tokens_seen": 239296, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 6.578947368421053, | |
| "grad_norm": 1.1817779541015625, | |
| "learning_rate": 1.5947967801765345e-05, | |
| "loss": 4.3442, | |
| "num_input_tokens_seen": 242848, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 6.614035087719298, | |
| "eval_loss": 4.141702175140381, | |
| "eval_runtime": 0.6064, | |
| "eval_samples_per_second": 41.225, | |
| "eval_steps_per_second": 11.543, | |
| "num_input_tokens_seen": 243840, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 1.2892791032791138, | |
| "learning_rate": 1.5238768666841907e-05, | |
| "loss": 4.0103, | |
| "num_input_tokens_seen": 245344, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 6.754385964912281, | |
| "grad_norm": 1.3167837858200073, | |
| "learning_rate": 1.4538720682400969e-05, | |
| "loss": 4.0767, | |
| "num_input_tokens_seen": 248256, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 6.842105263157895, | |
| "grad_norm": 1.1590781211853027, | |
| "learning_rate": 1.3848480143148839e-05, | |
| "loss": 4.2847, | |
| "num_input_tokens_seen": 250976, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 6.9298245614035086, | |
| "grad_norm": 1.3808413743972778, | |
| "learning_rate": 1.3168694149315796e-05, | |
| "loss": 4.0075, | |
| "num_input_tokens_seen": 254944, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 7.017543859649122, | |
| "grad_norm": 1.1876074075698853, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 4.0259, | |
| "num_input_tokens_seen": 257888, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 7.105263157894737, | |
| "grad_norm": 1.1949429512023926, | |
| "learning_rate": 1.1843024595699805e-05, | |
| "loss": 3.8654, | |
| "num_input_tokens_seen": 261088, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 7.12280701754386, | |
| "eval_loss": 4.0376129150390625, | |
| "eval_runtime": 0.6104, | |
| "eval_samples_per_second": 40.957, | |
| "eval_steps_per_second": 11.468, | |
| "num_input_tokens_seen": 261504, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 7.192982456140351, | |
| "grad_norm": 1.077356219291687, | |
| "learning_rate": 1.1198383850594758e-05, | |
| "loss": 3.8608, | |
| "num_input_tokens_seen": 263840, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 7.280701754385965, | |
| "grad_norm": 1.5111844539642334, | |
| "learning_rate": 1.0566682115126344e-05, | |
| "loss": 4.3957, | |
| "num_input_tokens_seen": 267424, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 7.368421052631579, | |
| "grad_norm": 1.4843523502349854, | |
| "learning_rate": 9.948511609419675e-06, | |
| "loss": 4.5217, | |
| "num_input_tokens_seen": 270720, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 7.456140350877193, | |
| "grad_norm": 1.4406427145004272, | |
| "learning_rate": 9.344451868077353e-06, | |
| "loss": 3.904, | |
| "num_input_tokens_seen": 273504, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 7.543859649122807, | |
| "grad_norm": 1.4402191638946533, | |
| "learning_rate": 8.755069196866014e-06, | |
| "loss": 4.3844, | |
| "num_input_tokens_seen": 276608, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 7.631578947368421, | |
| "grad_norm": 1.8447635173797607, | |
| "learning_rate": 8.180916141804906e-06, | |
| "loss": 4.2193, | |
| "num_input_tokens_seen": 280352, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 7.631578947368421, | |
| "eval_loss": 3.954362154006958, | |
| "eval_runtime": 0.6092, | |
| "eval_samples_per_second": 41.035, | |
| "eval_steps_per_second": 11.49, | |
| "num_input_tokens_seen": 280352, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 7.719298245614035, | |
| "grad_norm": 1.0336785316467285, | |
| "learning_rate": 7.622530971154199e-06, | |
| "loss": 4.0324, | |
| "num_input_tokens_seen": 283808, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 7.807017543859649, | |
| "grad_norm": 1.221795678138733, | |
| "learning_rate": 7.080437170788723e-06, | |
| "loss": 3.9307, | |
| "num_input_tokens_seen": 286688, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 7.894736842105263, | |
| "grad_norm": 1.4437804222106934, | |
| "learning_rate": 6.555142953430158e-06, | |
| "loss": 3.933, | |
| "num_input_tokens_seen": 290240, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 7.982456140350877, | |
| "grad_norm": 1.7613487243652344, | |
| "learning_rate": 6.0471407821978135e-06, | |
| "loss": 3.8559, | |
| "num_input_tokens_seen": 293568, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 8.070175438596491, | |
| "grad_norm": 1.53519606590271, | |
| "learning_rate": 5.556906908924655e-06, | |
| "loss": 3.6867, | |
| "num_input_tokens_seen": 296720, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 8.140350877192983, | |
| "eval_loss": 3.900949239730835, | |
| "eval_runtime": 0.6112, | |
| "eval_samples_per_second": 40.901, | |
| "eval_steps_per_second": 11.452, | |
| "num_input_tokens_seen": 299344, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 8.157894736842104, | |
| "grad_norm": 1.4721977710723877, | |
| "learning_rate": 5.084900927671393e-06, | |
| "loss": 4.4772, | |
| "num_input_tokens_seen": 300112, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 8.24561403508772, | |
| "grad_norm": 1.5723037719726562, | |
| "learning_rate": 4.631565343857239e-06, | |
| "loss": 4.0608, | |
| "num_input_tokens_seen": 303664, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 8.333333333333334, | |
| "grad_norm": 1.189079761505127, | |
| "learning_rate": 4.19732515941125e-06, | |
| "loss": 3.7438, | |
| "num_input_tokens_seen": 306480, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 8.421052631578947, | |
| "grad_norm": 1.3771969079971313, | |
| "learning_rate": 3.7825874743331907e-06, | |
| "loss": 4.0063, | |
| "num_input_tokens_seen": 309744, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 8.508771929824562, | |
| "grad_norm": 1.3309853076934814, | |
| "learning_rate": 3.3877411050374424e-06, | |
| "loss": 3.9324, | |
| "num_input_tokens_seen": 313360, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 8.596491228070175, | |
| "grad_norm": 1.665574550628662, | |
| "learning_rate": 3.013156219837776e-06, | |
| "loss": 3.9532, | |
| "num_input_tokens_seen": 316784, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 8.649122807017545, | |
| "eval_loss": 3.8729708194732666, | |
| "eval_runtime": 0.6067, | |
| "eval_samples_per_second": 41.205, | |
| "eval_steps_per_second": 11.537, | |
| "num_input_tokens_seen": 318672, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 8.68421052631579, | |
| "grad_norm": 1.3207027912139893, | |
| "learning_rate": 2.659183991914696e-06, | |
| "loss": 3.6781, | |
| "num_input_tokens_seen": 319760, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 8.771929824561404, | |
| "grad_norm": 0.9984751343727112, | |
| "learning_rate": 2.326156270090735e-06, | |
| "loss": 3.7426, | |
| "num_input_tokens_seen": 322512, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 8.859649122807017, | |
| "grad_norm": 1.2388505935668945, | |
| "learning_rate": 2.0143852677223075e-06, | |
| "loss": 3.9789, | |
| "num_input_tokens_seen": 325936, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 8.947368421052632, | |
| "grad_norm": 1.271156668663025, | |
| "learning_rate": 1.7241632699998123e-06, | |
| "loss": 4.0003, | |
| "num_input_tokens_seen": 329360, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 9.035087719298245, | |
| "grad_norm": 1.4883043766021729, | |
| "learning_rate": 1.4557623599303903e-06, | |
| "loss": 4.2719, | |
| "num_input_tokens_seen": 332168, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 9.12280701754386, | |
| "grad_norm": 1.2969716787338257, | |
| "learning_rate": 1.2094341632602064e-06, | |
| "loss": 4.1488, | |
| "num_input_tokens_seen": 336296, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 9.157894736842104, | |
| "eval_loss": 3.859020948410034, | |
| "eval_runtime": 0.6085, | |
| "eval_samples_per_second": 41.082, | |
| "eval_steps_per_second": 11.503, | |
| "num_input_tokens_seen": 337480, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 9.210526315789474, | |
| "grad_norm": 1.2461988925933838, | |
| "learning_rate": 9.85409612575411e-07, | |
| "loss": 4.0027, | |
| "num_input_tokens_seen": 339080, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 9.298245614035087, | |
| "grad_norm": 1.2477507591247559, | |
| "learning_rate": 7.838987308029427e-07, | |
| "loss": 4.0184, | |
| "num_input_tokens_seen": 342568, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 9.385964912280702, | |
| "grad_norm": 1.2659870386123657, | |
| "learning_rate": 6.050904343141095e-07, | |
| "loss": 3.9123, | |
| "num_input_tokens_seen": 345576, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 9.473684210526315, | |
| "grad_norm": 1.4222140312194824, | |
| "learning_rate": 4.491523558155714e-07, | |
| "loss": 3.7029, | |
| "num_input_tokens_seen": 348392, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 9.56140350877193, | |
| "grad_norm": 1.7531884908676147, | |
| "learning_rate": 3.162306871937387e-07, | |
| "loss": 4.0105, | |
| "num_input_tokens_seen": 351912, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 9.649122807017545, | |
| "grad_norm": 1.3468551635742188, | |
| "learning_rate": 2.064500424599436e-07, | |
| "loss": 4.0229, | |
| "num_input_tokens_seen": 355432, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 9.666666666666666, | |
| "eval_loss": 3.8482749462127686, | |
| "eval_runtime": 0.6125, | |
| "eval_samples_per_second": 40.816, | |
| "eval_steps_per_second": 11.429, | |
| "num_input_tokens_seen": 356456, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 9.736842105263158, | |
| "grad_norm": 1.8819421529769897, | |
| "learning_rate": 1.1991334092484318e-07, | |
| "loss": 3.8035, | |
| "num_input_tokens_seen": 358760, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 9.824561403508772, | |
| "grad_norm": 1.1540199518203735, | |
| "learning_rate": 5.6701710711626334e-08, | |
| "loss": 4.1963, | |
| "num_input_tokens_seen": 362088, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 9.912280701754385, | |
| "grad_norm": 1.252097249031067, | |
| "learning_rate": 1.6874412698408836e-08, | |
| "loss": 3.7447, | |
| "num_input_tokens_seen": 365544, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 2.58130145072937, | |
| "learning_rate": 4.687849611939576e-10, | |
| "loss": 3.8419, | |
| "num_input_tokens_seen": 367864, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "num_input_tokens_seen": 367864, | |
| "step": 570, | |
| "total_flos": 1.6564749657243648e+16, | |
| "train_loss": 6.090754040500574, | |
| "train_runtime": 121.9228, | |
| "train_samples_per_second": 18.454, | |
| "train_steps_per_second": 4.675 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 570, | |
| "num_input_tokens_seen": 367864, | |
| "num_train_epochs": 10, | |
| "save_steps": 29, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.6564749657243648e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |