| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.3027681660899654, | |
| "eval_steps": 500, | |
| "global_step": 350, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00865051903114187, | |
| "grad_norm": 0.6758226156234741, | |
| "learning_rate": 5.142857142857142e-06, | |
| "loss": 2.4878, | |
| "mean_token_accuracy": 0.5207233637571335, | |
| "num_tokens": 20480.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01730103806228374, | |
| "grad_norm": 0.527189314365387, | |
| "learning_rate": 1.0857142857142858e-05, | |
| "loss": 2.2677, | |
| "mean_token_accuracy": 0.5652981504797936, | |
| "num_tokens": 40960.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.025951557093425604, | |
| "grad_norm": 0.5320698022842407, | |
| "learning_rate": 1.6571428571428574e-05, | |
| "loss": 2.3177, | |
| "mean_token_accuracy": 0.5567937418818474, | |
| "num_tokens": 61440.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03460207612456748, | |
| "grad_norm": 0.4314405918121338, | |
| "learning_rate": 1.9999371690018227e-05, | |
| "loss": 2.1426, | |
| "mean_token_accuracy": 0.571554248034954, | |
| "num_tokens": 81920.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04325259515570934, | |
| "grad_norm": 0.413466215133667, | |
| "learning_rate": 1.9992304109437157e-05, | |
| "loss": 2.2219, | |
| "mean_token_accuracy": 0.5544477015733719, | |
| "num_tokens": 102400.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05190311418685121, | |
| "grad_norm": 0.514323353767395, | |
| "learning_rate": 1.997738912978706e-05, | |
| "loss": 2.1555, | |
| "mean_token_accuracy": 0.5584555208683014, | |
| "num_tokens": 122880.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06055363321799308, | |
| "grad_norm": 0.4057506322860718, | |
| "learning_rate": 1.9954638464462176e-05, | |
| "loss": 2.1002, | |
| "mean_token_accuracy": 0.5628543496131897, | |
| "num_tokens": 143360.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.06920415224913495, | |
| "grad_norm": 0.4184742271900177, | |
| "learning_rate": 1.9924069980567823e-05, | |
| "loss": 2.0149, | |
| "mean_token_accuracy": 0.5812316745519638, | |
| "num_tokens": 163840.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07785467128027682, | |
| "grad_norm": 0.4239538013935089, | |
| "learning_rate": 1.9885707684888565e-05, | |
| "loss": 1.9184, | |
| "mean_token_accuracy": 0.6034213066101074, | |
| "num_tokens": 184320.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.08650519031141868, | |
| "grad_norm": 0.42795529961586, | |
| "learning_rate": 1.9839581705034626e-05, | |
| "loss": 1.8282, | |
| "mean_token_accuracy": 0.6114858329296112, | |
| "num_tokens": 204800.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09515570934256055, | |
| "grad_norm": 0.4808609187602997, | |
| "learning_rate": 1.9785728265781327e-05, | |
| "loss": 1.7533, | |
| "mean_token_accuracy": 0.6275659844279289, | |
| "num_tokens": 225280.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.10380622837370242, | |
| "grad_norm": 0.4543810188770294, | |
| "learning_rate": 1.972418966062018e-05, | |
| "loss": 1.8477, | |
| "mean_token_accuracy": 0.6150537610054017, | |
| "num_tokens": 245760.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.11245674740484429, | |
| "grad_norm": 0.40035656094551086, | |
| "learning_rate": 1.965501421854394e-05, | |
| "loss": 1.8999, | |
| "mean_token_accuracy": 0.6046432048082352, | |
| "num_tokens": 266240.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.12110726643598616, | |
| "grad_norm": 0.5222700238227844, | |
| "learning_rate": 1.9578256266091692e-05, | |
| "loss": 1.8368, | |
| "mean_token_accuracy": 0.6162267863750458, | |
| "num_tokens": 286720.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.12975778546712802, | |
| "grad_norm": 0.47384974360466003, | |
| "learning_rate": 1.9493976084683814e-05, | |
| "loss": 1.6773, | |
| "mean_token_accuracy": 0.6407624676823616, | |
| "num_tokens": 307200.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1384083044982699, | |
| "grad_norm": 0.47149407863616943, | |
| "learning_rate": 1.9402239863280322e-05, | |
| "loss": 1.8808, | |
| "mean_token_accuracy": 0.6105571806430816, | |
| "num_tokens": 327680.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.14705882352941177, | |
| "grad_norm": 0.5852150321006775, | |
| "learning_rate": 1.930311964639973e-05, | |
| "loss": 1.7557, | |
| "mean_token_accuracy": 0.6328934475779533, | |
| "num_tokens": 348160.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.15570934256055363, | |
| "grad_norm": 0.5378768444061279, | |
| "learning_rate": 1.9196693277539306e-05, | |
| "loss": 1.9526, | |
| "mean_token_accuracy": 0.5930107563734055, | |
| "num_tokens": 368640.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.1643598615916955, | |
| "grad_norm": 0.7191518545150757, | |
| "learning_rate": 1.9083044338041136e-05, | |
| "loss": 1.6977, | |
| "mean_token_accuracy": 0.6364125147461891, | |
| "num_tokens": 389120.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.17301038062283736, | |
| "grad_norm": 0.5665961503982544, | |
| "learning_rate": 1.8962262081451965e-05, | |
| "loss": 1.7096, | |
| "mean_token_accuracy": 0.6426197499036789, | |
| "num_tokens": 409600.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.18166089965397925, | |
| "grad_norm": 0.5746972560882568, | |
| "learning_rate": 1.8834441363428464e-05, | |
| "loss": 1.8621, | |
| "mean_token_accuracy": 0.6132453620433808, | |
| "num_tokens": 430080.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1903114186851211, | |
| "grad_norm": 0.5433031916618347, | |
| "learning_rate": 1.8699682567242865e-05, | |
| "loss": 1.725, | |
| "mean_token_accuracy": 0.639393937587738, | |
| "num_tokens": 450560.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.19896193771626297, | |
| "grad_norm": 0.5260516405105591, | |
| "learning_rate": 1.8558091524947527e-05, | |
| "loss": 1.8052, | |
| "mean_token_accuracy": 0.62776148468256, | |
| "num_tokens": 471040.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.20761245674740483, | |
| "grad_norm": 0.5413312315940857, | |
| "learning_rate": 1.8409779434260345e-05, | |
| "loss": 1.4382, | |
| "mean_token_accuracy": 0.6926686212420463, | |
| "num_tokens": 491520.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.21626297577854672, | |
| "grad_norm": 0.6882454752922058, | |
| "learning_rate": 1.8254862771236258e-05, | |
| "loss": 1.8495, | |
| "mean_token_accuracy": 0.6179374352097511, | |
| "num_tokens": 512000.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.22491349480968859, | |
| "grad_norm": 0.5259960889816284, | |
| "learning_rate": 1.8093463198793433e-05, | |
| "loss": 1.7922, | |
| "mean_token_accuracy": 0.6273704767227173, | |
| "num_tokens": 532480.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.23356401384083045, | |
| "grad_norm": 0.5192792415618896, | |
| "learning_rate": 1.7925707471165993e-05, | |
| "loss": 1.6386, | |
| "mean_token_accuracy": 0.6586021527647972, | |
| "num_tokens": 552960.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.2422145328719723, | |
| "grad_norm": 0.6467787623405457, | |
| "learning_rate": 1.775172733435831e-05, | |
| "loss": 1.7193, | |
| "mean_token_accuracy": 0.6362658873200416, | |
| "num_tokens": 573440.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.2508650519031142, | |
| "grad_norm": 0.5666069388389587, | |
| "learning_rate": 1.7571659422679003e-05, | |
| "loss": 1.5196, | |
| "mean_token_accuracy": 0.6816715553402901, | |
| "num_tokens": 593920.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.25951557093425603, | |
| "grad_norm": 0.6515355110168457, | |
| "learning_rate": 1.7385645151436e-05, | |
| "loss": 1.6763, | |
| "mean_token_accuracy": 0.6472140833735466, | |
| "num_tokens": 614400.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2681660899653979, | |
| "grad_norm": 0.7086573243141174, | |
| "learning_rate": 1.7193830605876806e-05, | |
| "loss": 1.7996, | |
| "mean_token_accuracy": 0.6148582622408867, | |
| "num_tokens": 634880.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.2768166089965398, | |
| "grad_norm": 0.6187496185302734, | |
| "learning_rate": 1.699636642646129e-05, | |
| "loss": 1.6142, | |
| "mean_token_accuracy": 0.6498533740639687, | |
| "num_tokens": 655360.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.28546712802768165, | |
| "grad_norm": 0.5988145470619202, | |
| "learning_rate": 1.6793407690557075e-05, | |
| "loss": 1.834, | |
| "mean_token_accuracy": 0.6208211183547974, | |
| "num_tokens": 675840.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.29411764705882354, | |
| "grad_norm": 0.6012206077575684, | |
| "learning_rate": 1.658511379065039e-05, | |
| "loss": 1.6939, | |
| "mean_token_accuracy": 0.6402737081050873, | |
| "num_tokens": 696320.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.3027681660899654, | |
| "grad_norm": 0.5834046602249146, | |
| "learning_rate": 1.63716483091681e-05, | |
| "loss": 1.8679, | |
| "mean_token_accuracy": 0.608113394677639, | |
| "num_tokens": 716800.0, | |
| "step": 350 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1156, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.22451948208128e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |