| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "global_step": 2230, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 2.2499999999999996e-06, | |
| "loss": 11.7449, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 9.75e-06, | |
| "loss": 9.9528, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 1.725e-05, | |
| "loss": 7.8835, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.475e-05, | |
| "loss": 6.9463, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 3.225e-05, | |
| "loss": 6.3116, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 3.975e-05, | |
| "loss": 5.8418, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.7249999999999997e-05, | |
| "loss": 5.6196, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 5.4749999999999996e-05, | |
| "loss": 5.4065, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 6.225e-05, | |
| "loss": 5.3217, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 6.975e-05, | |
| "loss": 5.2094, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 7.725e-05, | |
| "loss": 5.2388, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 8.474999999999999e-05, | |
| "loss": 5.1522, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 9.224999999999999e-05, | |
| "loss": 5.1679, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 9.975e-05, | |
| "loss": 5.112, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00010724999999999999, | |
| "loss": 5.1008, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00011475, | |
| "loss": 5.1024, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00012225, | |
| "loss": 5.0238, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00012974999999999998, | |
| "loss": 5.061, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00013725, | |
| "loss": 5.0293, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00014475, | |
| "loss": 5.0315, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00015224999999999996, | |
| "loss": 5.1264, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00015974999999999998, | |
| "loss": 4.9663, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016724999999999997, | |
| "loss": 4.9525, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00017475, | |
| "loss": 4.9757, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00018224999999999998, | |
| "loss": 4.9203, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00018974999999999998, | |
| "loss": 4.908, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00019724999999999997, | |
| "loss": 4.845, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00020475, | |
| "loss": 4.8367, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.00021224999999999998, | |
| "loss": 4.8034, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00021975, | |
| "loss": 4.8215, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00022724999999999997, | |
| "loss": 4.7776, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00023474999999999996, | |
| "loss": 4.7166, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00024224999999999998, | |
| "loss": 4.7696, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.000249, | |
| "loss": 4.6817, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00025649999999999995, | |
| "loss": 4.7147, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00026399999999999997, | |
| "loss": 4.6688, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.0002715, | |
| "loss": 4.5991, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.000279, | |
| "loss": 4.6401, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00028649999999999997, | |
| "loss": 4.6029, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.000294, | |
| "loss": 4.5118, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_loss": 4.247889041900635, | |
| "eval_runtime": 51.3593, | |
| "eval_samples_per_second": 51.442, | |
| "eval_steps_per_second": 0.818, | |
| "eval_wer": 0.9033518445061484, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.000299672131147541, | |
| "loss": 4.5483, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.00029803278688524587, | |
| "loss": 4.4005, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00029639344262295076, | |
| "loss": 4.327, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.0002947540983606557, | |
| "loss": 4.2717, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.0002931147540983606, | |
| "loss": 4.0139, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00029147540983606555, | |
| "loss": 3.8304, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.0002898360655737705, | |
| "loss": 3.5825, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.0002881967213114754, | |
| "loss": 3.4968, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.0002865573770491803, | |
| "loss": 3.2832, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00028491803278688524, | |
| "loss": 3.1368, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00028327868852459013, | |
| "loss": 2.899, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.0002816393442622951, | |
| "loss": 2.6293, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00028, | |
| "loss": 2.7137, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00027836065573770487, | |
| "loss": 2.4069, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.0002767213114754098, | |
| "loss": 2.3349, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.0002750819672131147, | |
| "loss": 2.1064, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00027344262295081966, | |
| "loss": 1.9375, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00027180327868852455, | |
| "loss": 1.9395, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.0002701639344262295, | |
| "loss": 1.7861, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.0002685245901639344, | |
| "loss": 1.6795, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0002668852459016393, | |
| "loss": 1.5166, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00026524590163934424, | |
| "loss": 1.4776, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00026360655737704913, | |
| "loss": 1.4286, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.0002619672131147541, | |
| "loss": 1.3071, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00026032786885245903, | |
| "loss": 1.277, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.0002586885245901639, | |
| "loss": 1.2741, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.0002570491803278688, | |
| "loss": 1.1661, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 0.00025540983606557377, | |
| "loss": 1.1585, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 0.00025377049180327866, | |
| "loss": 1.1015, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 0.0002521311475409836, | |
| "loss": 1.0403, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.0002504918032786885, | |
| "loss": 1.0001, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 0.0002488524590163934, | |
| "loss": 0.9915, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 0.00024721311475409835, | |
| "loss": 0.9481, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 0.00024557377049180324, | |
| "loss": 0.9522, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 0.00024393442622950816, | |
| "loss": 0.9063, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 0.00024229508196721308, | |
| "loss": 0.8868, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 0.000240655737704918, | |
| "loss": 0.818, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 0.00023901639344262293, | |
| "loss": 0.8436, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 0.00023737704918032785, | |
| "loss": 0.7755, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 0.00023573770491803274, | |
| "loss": 0.8016, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "eval_loss": 0.8116478323936462, | |
| "eval_runtime": 61.0806, | |
| "eval_samples_per_second": 43.254, | |
| "eval_steps_per_second": 0.688, | |
| "eval_wer": 0.2206862356207854, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 0.00023409836065573766, | |
| "loss": 0.7623, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 0.00023245901639344258, | |
| "loss": 0.7884, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 0.00023081967213114753, | |
| "loss": 0.7791, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 0.00022918032786885245, | |
| "loss": 0.7111, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 0.00022754098360655738, | |
| "loss": 0.7063, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 0.0002259016393442623, | |
| "loss": 0.7307, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 0.0002242622950819672, | |
| "loss": 0.6792, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 0.0002226229508196721, | |
| "loss": 0.6496, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 0.00022098360655737703, | |
| "loss": 0.6929, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 0.00021934426229508195, | |
| "loss": 0.5503, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 0.00021770491803278688, | |
| "loss": 0.4833, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 0.0002160655737704918, | |
| "loss": 0.5107, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 0.0002144262295081967, | |
| "loss": 0.4637, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 0.0002127868852459016, | |
| "loss": 0.4764, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 0.00021114754098360653, | |
| "loss": 0.4332, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 0.00020950819672131146, | |
| "loss": 0.4599, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 0.00020786885245901638, | |
| "loss": 0.4582, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 0.00020622950819672127, | |
| "loss": 0.4112, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 0.0002045901639344262, | |
| "loss": 0.4441, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 0.0002029508196721311, | |
| "loss": 0.4575, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 0.00020131147540983603, | |
| "loss": 0.4177, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 0.00019967213114754098, | |
| "loss": 0.4323, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 0.0001980327868852459, | |
| "loss": 0.4295, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 0.00019639344262295083, | |
| "loss": 0.4595, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 0.00019475409836065572, | |
| "loss": 0.4338, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 0.00019311475409836064, | |
| "loss": 0.4104, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 0.00019147540983606556, | |
| "loss": 0.4216, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 0.00018983606557377048, | |
| "loss": 0.3729, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 0.0001881967213114754, | |
| "loss": 0.3998, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 0.0001865573770491803, | |
| "loss": 0.4082, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 0.00018491803278688522, | |
| "loss": 0.38, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 0.00018327868852459014, | |
| "loss": 0.4157, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 0.00018163934426229506, | |
| "loss": 0.3937, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 0.3772, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 0.0001783606557377049, | |
| "loss": 0.3767, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 0.0001767213114754098, | |
| "loss": 0.3602, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 0.00017508196721311472, | |
| "loss": 0.3876, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 0.00017344262295081964, | |
| "loss": 0.3776, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 0.00017180327868852456, | |
| "loss": 0.3617, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 0.0001701639344262295, | |
| "loss": 0.3627, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "eval_loss": 0.5665903091430664, | |
| "eval_runtime": 61.1317, | |
| "eval_samples_per_second": 43.218, | |
| "eval_steps_per_second": 0.687, | |
| "eval_wer": 0.1506743355811186, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 0.00016852459016393443, | |
| "loss": 0.3478, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 0.00016688524590163935, | |
| "loss": 0.3861, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 0.00016524590163934425, | |
| "loss": 0.3283, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 0.00016360655737704917, | |
| "loss": 0.3455, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 0.0001619672131147541, | |
| "loss": 0.3457, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 0.000160327868852459, | |
| "loss": 0.3178, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 0.00015868852459016393, | |
| "loss": 0.3429, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 0.00015704918032786883, | |
| "loss": 0.3253, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 0.00015540983606557375, | |
| "loss": 0.3469, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 0.00015377049180327867, | |
| "loss": 0.334, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 0.0001521311475409836, | |
| "loss": 0.3246, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 0.0001504918032786885, | |
| "loss": 0.3243, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 0.00014885245901639343, | |
| "loss": 0.3219, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 0.00014721311475409836, | |
| "loss": 0.3022, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 0.00014557377049180328, | |
| "loss": 0.1935, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 0.0001439344262295082, | |
| "loss": 0.2093, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 0.0001422950819672131, | |
| "loss": 0.1961, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 0.00014065573770491801, | |
| "loss": 0.1788, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 0.00013901639344262294, | |
| "loss": 0.2245, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 0.00013737704918032786, | |
| "loss": 0.1815, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 0.00013573770491803278, | |
| "loss": 0.2018, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 0.0001340983606557377, | |
| "loss": 0.1909, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 0.00013245901639344262, | |
| "loss": 0.2025, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 0.00013081967213114754, | |
| "loss": 0.1927, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 0.00012918032786885246, | |
| "loss": 0.1816, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 0.00012754098360655736, | |
| "loss": 0.2217, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 0.00012590163934426228, | |
| "loss": 0.2035, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 0.0001242622950819672, | |
| "loss": 0.1904, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 0.00012262295081967212, | |
| "loss": 0.1945, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 0.00012098360655737703, | |
| "loss": 0.1724, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 0.00011934426229508195, | |
| "loss": 0.1704, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 0.00011770491803278688, | |
| "loss": 0.1745, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 0.00011606557377049179, | |
| "loss": 0.1756, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 0.00011442622950819671, | |
| "loss": 0.1864, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 0.00011278688524590164, | |
| "loss": 0.1799, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 0.00011114754098360654, | |
| "loss": 0.1974, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 0.00010950819672131146, | |
| "loss": 0.1751, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 0.00010786885245901639, | |
| "loss": 0.1792, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 0.00010622950819672129, | |
| "loss": 0.1887, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 0.00010459016393442621, | |
| "loss": 0.1763, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "eval_loss": 0.47305428981781006, | |
| "eval_runtime": 60.8349, | |
| "eval_samples_per_second": 43.429, | |
| "eval_steps_per_second": 0.69, | |
| "eval_wer": 0.12973026576755256, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 0.00010295081967213115, | |
| "loss": 0.1767, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 0.00010131147540983606, | |
| "loss": 0.1769, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 9.967213114754098e-05, | |
| "loss": 0.1691, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 9.80327868852459e-05, | |
| "loss": 0.1762, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 9.639344262295081e-05, | |
| "loss": 0.1636, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 9.475409836065573e-05, | |
| "loss": 0.1834, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 9.311475409836065e-05, | |
| "loss": 0.1781, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 9.147540983606556e-05, | |
| "loss": 0.1686, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 8.983606557377048e-05, | |
| "loss": 0.1657, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 8.819672131147541e-05, | |
| "loss": 0.1468, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 8.655737704918032e-05, | |
| "loss": 0.1686, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 8.491803278688524e-05, | |
| "loss": 0.1583, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 8.327868852459016e-05, | |
| "loss": 0.1656, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 8.163934426229507e-05, | |
| "loss": 0.1595, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 7.999999999999999e-05, | |
| "loss": 0.1563, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 7.836065573770491e-05, | |
| "loss": 0.1625, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 7.672131147540982e-05, | |
| "loss": 0.1439, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 7.508196721311474e-05, | |
| "loss": 0.1593, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 7.344262295081966e-05, | |
| "loss": 0.1042, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 7.180327868852459e-05, | |
| "loss": 0.0931, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 7.01639344262295e-05, | |
| "loss": 0.0909, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "learning_rate": 6.852459016393443e-05, | |
| "loss": 0.0955, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 6.688524590163934e-05, | |
| "loss": 0.102, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 6.524590163934426e-05, | |
| "loss": 0.0889, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 6.360655737704918e-05, | |
| "loss": 0.0968, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 6.19672131147541e-05, | |
| "loss": 0.0809, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 6.0327868852459015e-05, | |
| "loss": 0.0869, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 5.868852459016393e-05, | |
| "loss": 0.0913, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 5.7049180327868844e-05, | |
| "loss": 0.078, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "learning_rate": 5.5409836065573765e-05, | |
| "loss": 0.0924, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 5.3770491803278686e-05, | |
| "loss": 0.0879, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 5.21311475409836e-05, | |
| "loss": 0.0869, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 5.049180327868852e-05, | |
| "loss": 0.0832, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 4.8852459016393436e-05, | |
| "loss": 0.0777, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 4.721311475409836e-05, | |
| "loss": 0.0905, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 4.557377049180328e-05, | |
| "loss": 0.0818, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 4.3934426229508194e-05, | |
| "loss": 0.0829, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 4.229508196721311e-05, | |
| "loss": 0.0865, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 4.065573770491803e-05, | |
| "loss": 0.0895, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 3.901639344262295e-05, | |
| "loss": 0.089, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "eval_loss": 0.41652488708496094, | |
| "eval_runtime": 61.3777, | |
| "eval_samples_per_second": 43.045, | |
| "eval_steps_per_second": 0.684, | |
| "eval_wer": 0.11800872669575566, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 3.7377049180327865e-05, | |
| "loss": 0.071, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 3.5737704918032786e-05, | |
| "loss": 0.0847, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 3.40983606557377e-05, | |
| "loss": 0.0898, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "learning_rate": 3.245901639344262e-05, | |
| "loss": 0.0798, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 3.081967213114754e-05, | |
| "loss": 0.091, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 2.9180327868852458e-05, | |
| "loss": 0.0867, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 2.7540983606557373e-05, | |
| "loss": 0.0847, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 2.5901639344262294e-05, | |
| "loss": 0.0861, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 2.4262295081967212e-05, | |
| "loss": 0.0858, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 2.262295081967213e-05, | |
| "loss": 0.0862, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "learning_rate": 2.0983606557377048e-05, | |
| "loss": 0.0781, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 1.9344262295081962e-05, | |
| "loss": 0.0733, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 1.7704918032786883e-05, | |
| "loss": 0.0701, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 1.60655737704918e-05, | |
| "loss": 0.0688, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 1.442622950819672e-05, | |
| "loss": 0.0793, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.2786885245901637e-05, | |
| "loss": 0.074, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 1.1147540983606557e-05, | |
| "loss": 0.0728, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 9.508196721311474e-06, | |
| "loss": 0.0702, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 4.91, | |
| "learning_rate": 7.868852459016392e-06, | |
| "loss": 0.0672, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 6.229508196721311e-06, | |
| "loss": 0.0799, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 4.590163934426229e-06, | |
| "loss": 0.0782, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 2.9508196721311474e-06, | |
| "loss": 0.0856, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 1.3114754098360655e-06, | |
| "loss": 0.0727, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 2230, | |
| "total_flos": 2.9474537983075942e+19, | |
| "train_loss": 1.5071858802451146, | |
| "train_runtime": 3191.5244, | |
| "train_samples_per_second": 44.709, | |
| "train_steps_per_second": 0.699 | |
| } | |
| ], | |
| "max_steps": 2230, | |
| "num_train_epochs": 5, | |
| "total_flos": 2.9474537983075942e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |