| { | |
| "best_metric": 0.8421279191970825, | |
| "best_model_checkpoint": "miner_id_24/checkpoint-200", | |
| "epoch": 0.029961424665742857, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00014980712332871428, | |
| "grad_norm": 0.6298632621765137, | |
| "learning_rate": 1e-05, | |
| "loss": 0.8866, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00014980712332871428, | |
| "eval_loss": 1.264488935470581, | |
| "eval_runtime": 565.4748, | |
| "eval_samples_per_second": 19.882, | |
| "eval_steps_per_second": 4.971, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00029961424665742855, | |
| "grad_norm": 0.46446141600608826, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7634, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.00044942136998614283, | |
| "grad_norm": 0.8355027437210083, | |
| "learning_rate": 3e-05, | |
| "loss": 0.9169, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0005992284933148571, | |
| "grad_norm": 0.644906222820282, | |
| "learning_rate": 4e-05, | |
| "loss": 0.9282, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0007490356166435714, | |
| "grad_norm": 0.49212518334388733, | |
| "learning_rate": 5e-05, | |
| "loss": 0.9798, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0008988427399722857, | |
| "grad_norm": 0.44296741485595703, | |
| "learning_rate": 6e-05, | |
| "loss": 1.0082, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.001048649863301, | |
| "grad_norm": 0.3797648847103119, | |
| "learning_rate": 7e-05, | |
| "loss": 0.9093, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.0011984569866297142, | |
| "grad_norm": 0.3763546049594879, | |
| "learning_rate": 8e-05, | |
| "loss": 1.0401, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0013482641099584285, | |
| "grad_norm": 0.3808014988899231, | |
| "learning_rate": 9e-05, | |
| "loss": 1.0106, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0014980712332871428, | |
| "grad_norm": 0.3745298385620117, | |
| "learning_rate": 0.0001, | |
| "loss": 0.9575, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.001647878356615857, | |
| "grad_norm": 0.39324429631233215, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 0.9435, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0017976854799445713, | |
| "grad_norm": 0.3526301085948944, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.8208, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0019474926032732856, | |
| "grad_norm": 0.37045592069625854, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.9634, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.002097299726602, | |
| "grad_norm": 0.33772504329681396, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.8449, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0022471068499307144, | |
| "grad_norm": 0.43836352229118347, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.6716, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.0023969139732594284, | |
| "grad_norm": 0.3777485191822052, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.8852, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.002546721096588143, | |
| "grad_norm": 0.36619219183921814, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.8873, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.002696528219916857, | |
| "grad_norm": 0.3267233073711395, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.855, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.0028463353432455715, | |
| "grad_norm": 0.32704541087150574, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.8803, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0029961424665742855, | |
| "grad_norm": 0.4222426414489746, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.8979, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.003145949589903, | |
| "grad_norm": 0.33164480328559875, | |
| "learning_rate": 9.917525374361912e-05, | |
| "loss": 0.7718, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.003295756713231714, | |
| "grad_norm": 0.32442235946655273, | |
| "learning_rate": 9.901899829374047e-05, | |
| "loss": 0.6811, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.0034455638365604286, | |
| "grad_norm": 0.3956811726093292, | |
| "learning_rate": 9.884934153917997e-05, | |
| "loss": 0.8274, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.0035953709598891426, | |
| "grad_norm": 0.4070234000682831, | |
| "learning_rate": 9.86663298624003e-05, | |
| "loss": 0.8034, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.003745178083217857, | |
| "grad_norm": 0.30631351470947266, | |
| "learning_rate": 9.847001329696653e-05, | |
| "loss": 0.7311, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.003894985206546571, | |
| "grad_norm": 0.34183362126350403, | |
| "learning_rate": 9.826044551386744e-05, | |
| "loss": 0.8869, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.004044792329875285, | |
| "grad_norm": 0.29005616903305054, | |
| "learning_rate": 9.803768380684242e-05, | |
| "loss": 0.8703, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.004194599453204, | |
| "grad_norm": 0.359518826007843, | |
| "learning_rate": 9.780178907671789e-05, | |
| "loss": 0.8542, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.004344406576532714, | |
| "grad_norm": 0.35285744071006775, | |
| "learning_rate": 9.755282581475769e-05, | |
| "loss": 0.7862, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.004494213699861429, | |
| "grad_norm": 0.32191500067710876, | |
| "learning_rate": 9.729086208503174e-05, | |
| "loss": 0.7777, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.004644020823190142, | |
| "grad_norm": 0.34467917680740356, | |
| "learning_rate": 9.701596950580806e-05, | |
| "loss": 0.8035, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.004793827946518857, | |
| "grad_norm": 0.31413033604621887, | |
| "learning_rate": 9.672822322997305e-05, | |
| "loss": 0.8019, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.004943635069847571, | |
| "grad_norm": 0.33082014322280884, | |
| "learning_rate": 9.642770192448536e-05, | |
| "loss": 0.7154, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.005093442193176286, | |
| "grad_norm": 0.34413284063339233, | |
| "learning_rate": 9.611448774886924e-05, | |
| "loss": 0.6867, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.0052432493165049995, | |
| "grad_norm": 0.3844047486782074, | |
| "learning_rate": 9.578866633275288e-05, | |
| "loss": 0.8222, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.005393056439833714, | |
| "grad_norm": 0.47779640555381775, | |
| "learning_rate": 9.545032675245813e-05, | |
| "loss": 0.8534, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.0055428635631624285, | |
| "grad_norm": 2.2068326473236084, | |
| "learning_rate": 9.509956150664796e-05, | |
| "loss": 0.8379, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.005692670686491143, | |
| "grad_norm": 0.41778677701950073, | |
| "learning_rate": 9.473646649103818e-05, | |
| "loss": 0.9006, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.005842477809819857, | |
| "grad_norm": 0.4241478145122528, | |
| "learning_rate": 9.43611409721806e-05, | |
| "loss": 0.9492, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.005992284933148571, | |
| "grad_norm": 0.4319832921028137, | |
| "learning_rate": 9.397368756032445e-05, | |
| "loss": 1.0443, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0061420920564772856, | |
| "grad_norm": 0.36160099506378174, | |
| "learning_rate": 9.357421218136386e-05, | |
| "loss": 0.6914, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.006291899179806, | |
| "grad_norm": 0.4357793629169464, | |
| "learning_rate": 9.316282404787871e-05, | |
| "loss": 0.872, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.006441706303134714, | |
| "grad_norm": 0.4501705467700958, | |
| "learning_rate": 9.273963562927695e-05, | |
| "loss": 0.8288, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.006591513426463428, | |
| "grad_norm": 0.5566861033439636, | |
| "learning_rate": 9.230476262104677e-05, | |
| "loss": 0.9786, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.006741320549792143, | |
| "grad_norm": 0.5371454954147339, | |
| "learning_rate": 9.185832391312644e-05, | |
| "loss": 0.9145, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.006891127673120857, | |
| "grad_norm": 0.5671194791793823, | |
| "learning_rate": 9.140044155740101e-05, | |
| "loss": 0.9264, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.007040934796449571, | |
| "grad_norm": 0.6908413171768188, | |
| "learning_rate": 9.093124073433463e-05, | |
| "loss": 1.0477, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.007190741919778285, | |
| "grad_norm": 0.7245323061943054, | |
| "learning_rate": 9.045084971874738e-05, | |
| "loss": 0.9384, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.007340549043107, | |
| "grad_norm": 0.9785915613174438, | |
| "learning_rate": 8.995939984474624e-05, | |
| "loss": 1.132, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.007490356166435714, | |
| "grad_norm": 1.2804609537124634, | |
| "learning_rate": 8.945702546981969e-05, | |
| "loss": 1.0875, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.007490356166435714, | |
| "eval_loss": 0.8688188791275024, | |
| "eval_runtime": 568.3984, | |
| "eval_samples_per_second": 19.78, | |
| "eval_steps_per_second": 4.945, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.007640163289764428, | |
| "grad_norm": 0.2369249016046524, | |
| "learning_rate": 8.894386393810563e-05, | |
| "loss": 0.8366, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.007789970413093142, | |
| "grad_norm": 0.2243618369102478, | |
| "learning_rate": 8.842005554284296e-05, | |
| "loss": 0.7335, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.007939777536421857, | |
| "grad_norm": 0.3180404305458069, | |
| "learning_rate": 8.788574348801675e-05, | |
| "loss": 0.6498, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.00808958465975057, | |
| "grad_norm": 0.3245214521884918, | |
| "learning_rate": 8.73410738492077e-05, | |
| "loss": 0.8886, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.008239391783079286, | |
| "grad_norm": 0.24352572858333588, | |
| "learning_rate": 8.678619553365659e-05, | |
| "loss": 0.7486, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.008389198906408, | |
| "grad_norm": 0.24336153268814087, | |
| "learning_rate": 8.622126023955446e-05, | |
| "loss": 0.8348, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.008539006029736713, | |
| "grad_norm": 0.2256353199481964, | |
| "learning_rate": 8.564642241456986e-05, | |
| "loss": 0.8133, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.008688813153065428, | |
| "grad_norm": 0.2895691692829132, | |
| "learning_rate": 8.506183921362443e-05, | |
| "loss": 0.874, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.008838620276394142, | |
| "grad_norm": 0.2610842287540436, | |
| "learning_rate": 8.44676704559283e-05, | |
| "loss": 0.8169, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.008988427399722857, | |
| "grad_norm": 0.2484428733587265, | |
| "learning_rate": 8.386407858128706e-05, | |
| "loss": 0.8387, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.009138234523051571, | |
| "grad_norm": 0.23056460916996002, | |
| "learning_rate": 8.32512286056924e-05, | |
| "loss": 0.8971, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.009288041646380285, | |
| "grad_norm": 0.24234654009342194, | |
| "learning_rate": 8.262928807620843e-05, | |
| "loss": 0.7921, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.009437848769709, | |
| "grad_norm": 0.22825555503368378, | |
| "learning_rate": 8.199842702516583e-05, | |
| "loss": 0.8535, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.009587655893037714, | |
| "grad_norm": 0.24779392778873444, | |
| "learning_rate": 8.135881792367686e-05, | |
| "loss": 0.7923, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.009737463016366429, | |
| "grad_norm": 0.2588535249233246, | |
| "learning_rate": 8.07106356344834e-05, | |
| "loss": 0.7389, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.009887270139695143, | |
| "grad_norm": 0.24879321455955505, | |
| "learning_rate": 8.005405736415126e-05, | |
| "loss": 0.7638, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.010037077263023856, | |
| "grad_norm": 0.2867943048477173, | |
| "learning_rate": 7.938926261462366e-05, | |
| "loss": 0.8114, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.010186884386352572, | |
| "grad_norm": 0.2666427493095398, | |
| "learning_rate": 7.871643313414718e-05, | |
| "loss": 0.6136, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.010336691509681285, | |
| "grad_norm": 0.2932893633842468, | |
| "learning_rate": 7.803575286758364e-05, | |
| "loss": 0.9237, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.010486498633009999, | |
| "grad_norm": 0.2577749788761139, | |
| "learning_rate": 7.734740790612136e-05, | |
| "loss": 0.7551, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.010636305756338714, | |
| "grad_norm": 0.2540102005004883, | |
| "learning_rate": 7.66515864363997e-05, | |
| "loss": 0.7356, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.010786112879667428, | |
| "grad_norm": 0.33452343940734863, | |
| "learning_rate": 7.594847868906076e-05, | |
| "loss": 0.6979, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.010935920002996143, | |
| "grad_norm": 0.27727267146110535, | |
| "learning_rate": 7.52382768867422e-05, | |
| "loss": 0.7675, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.011085727126324857, | |
| "grad_norm": 0.2692679464817047, | |
| "learning_rate": 7.452117519152542e-05, | |
| "loss": 0.7576, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.01123553424965357, | |
| "grad_norm": 0.2856887876987457, | |
| "learning_rate": 7.379736965185368e-05, | |
| "loss": 0.8196, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.011385341372982286, | |
| "grad_norm": 0.2883060574531555, | |
| "learning_rate": 7.30670581489344e-05, | |
| "loss": 0.8396, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.011535148496311, | |
| "grad_norm": 0.28473779559135437, | |
| "learning_rate": 7.233044034264034e-05, | |
| "loss": 0.8057, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.011684955619639713, | |
| "grad_norm": 0.2777874171733856, | |
| "learning_rate": 7.158771761692464e-05, | |
| "loss": 0.6675, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.011834762742968429, | |
| "grad_norm": 0.31093916296958923, | |
| "learning_rate": 7.083909302476453e-05, | |
| "loss": 0.9278, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.011984569866297142, | |
| "grad_norm": 0.28080371022224426, | |
| "learning_rate": 7.008477123264848e-05, | |
| "loss": 0.8478, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.012134376989625857, | |
| "grad_norm": 0.2555106580257416, | |
| "learning_rate": 6.932495846462261e-05, | |
| "loss": 0.8204, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.012284184112954571, | |
| "grad_norm": 0.29992347955703735, | |
| "learning_rate": 6.855986244591104e-05, | |
| "loss": 0.8931, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.012433991236283285, | |
| "grad_norm": 0.2982628643512726, | |
| "learning_rate": 6.778969234612584e-05, | |
| "loss": 0.7932, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.012583798359612, | |
| "grad_norm": 0.29373466968536377, | |
| "learning_rate": 6.701465872208216e-05, | |
| "loss": 0.9268, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.012733605482940714, | |
| "grad_norm": 0.32724693417549133, | |
| "learning_rate": 6.623497346023418e-05, | |
| "loss": 0.8588, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.012883412606269427, | |
| "grad_norm": 0.2967337667942047, | |
| "learning_rate": 6.545084971874738e-05, | |
| "loss": 0.6942, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.013033219729598143, | |
| "grad_norm": 0.35658177733421326, | |
| "learning_rate": 6.466250186922325e-05, | |
| "loss": 0.7281, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.013183026852926856, | |
| "grad_norm": 0.33493468165397644, | |
| "learning_rate": 6.387014543809223e-05, | |
| "loss": 0.8296, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.013332833976255572, | |
| "grad_norm": 0.3347296118736267, | |
| "learning_rate": 6.307399704769099e-05, | |
| "loss": 0.7777, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.013482641099584285, | |
| "grad_norm": 0.3696501553058624, | |
| "learning_rate": 6.227427435703997e-05, | |
| "loss": 0.8478, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.013632448222912999, | |
| "grad_norm": 0.5119266510009766, | |
| "learning_rate": 6.147119600233758e-05, | |
| "loss": 0.7589, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.013782255346241714, | |
| "grad_norm": 0.5026902556419373, | |
| "learning_rate": 6.066498153718735e-05, | |
| "loss": 0.8701, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.013932062469570428, | |
| "grad_norm": 0.43266385793685913, | |
| "learning_rate": 5.985585137257401e-05, | |
| "loss": 0.8913, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.014081869592899142, | |
| "grad_norm": 0.4305877983570099, | |
| "learning_rate": 5.90440267166055e-05, | |
| "loss": 0.6905, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.014231676716227857, | |
| "grad_norm": 0.43609902262687683, | |
| "learning_rate": 5.8229729514036705e-05, | |
| "loss": 0.7148, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.01438148383955657, | |
| "grad_norm": 0.5080938339233398, | |
| "learning_rate": 5.74131823855921e-05, | |
| "loss": 1.0156, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.014531290962885286, | |
| "grad_norm": 0.5817869901657104, | |
| "learning_rate": 5.6594608567103456e-05, | |
| "loss": 1.1493, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.014681098086214, | |
| "grad_norm": 0.9376845359802246, | |
| "learning_rate": 5.577423184847932e-05, | |
| "loss": 1.0068, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.014830905209542713, | |
| "grad_norm": 0.8120721578598022, | |
| "learning_rate": 5.495227651252315e-05, | |
| "loss": 1.0105, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.014980712332871429, | |
| "grad_norm": 1.0948816537857056, | |
| "learning_rate": 5.4128967273616625e-05, | |
| "loss": 1.0315, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.014980712332871429, | |
| "eval_loss": 0.8477387428283691, | |
| "eval_runtime": 567.7372, | |
| "eval_samples_per_second": 19.803, | |
| "eval_steps_per_second": 4.951, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.015130519456200142, | |
| "grad_norm": 0.21487335860729218, | |
| "learning_rate": 5.330452921628497e-05, | |
| "loss": 0.8855, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.015280326579528856, | |
| "grad_norm": 0.27073433995246887, | |
| "learning_rate": 5.247918773366112e-05, | |
| "loss": 0.78, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.015430133702857571, | |
| "grad_norm": 0.22832012176513672, | |
| "learning_rate": 5.165316846586541e-05, | |
| "loss": 0.5703, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.015579940826186285, | |
| "grad_norm": 0.561996340751648, | |
| "learning_rate": 5.0826697238317935e-05, | |
| "loss": 0.8075, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.015729747949515, | |
| "grad_norm": 0.21872995793819427, | |
| "learning_rate": 5e-05, | |
| "loss": 0.7439, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.015879555072843714, | |
| "grad_norm": 0.23321197926998138, | |
| "learning_rate": 4.917330276168208e-05, | |
| "loss": 0.9032, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.016029362196172427, | |
| "grad_norm": 0.247480109333992, | |
| "learning_rate": 4.834683153413459e-05, | |
| "loss": 1.0497, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.01617916931950114, | |
| "grad_norm": 0.22745971381664276, | |
| "learning_rate": 4.7520812266338885e-05, | |
| "loss": 0.9326, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.016328976442829858, | |
| "grad_norm": 0.21271130442619324, | |
| "learning_rate": 4.669547078371504e-05, | |
| "loss": 0.7486, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.01647878356615857, | |
| "grad_norm": 0.26367267966270447, | |
| "learning_rate": 4.5871032726383386e-05, | |
| "loss": 0.893, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.016628590689487285, | |
| "grad_norm": 0.2798305153846741, | |
| "learning_rate": 4.504772348747687e-05, | |
| "loss": 0.883, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.016778397812816, | |
| "grad_norm": 0.2146042287349701, | |
| "learning_rate": 4.4225768151520694e-05, | |
| "loss": 0.9419, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.016928204936144713, | |
| "grad_norm": 0.23173148930072784, | |
| "learning_rate": 4.3405391432896555e-05, | |
| "loss": 0.891, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.017078012059473426, | |
| "grad_norm": 0.24624301493167877, | |
| "learning_rate": 4.2586817614407895e-05, | |
| "loss": 0.8058, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.017227819182802143, | |
| "grad_norm": 0.28666770458221436, | |
| "learning_rate": 4.17702704859633e-05, | |
| "loss": 0.886, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.017377626306130857, | |
| "grad_norm": 0.24581004679203033, | |
| "learning_rate": 4.095597328339452e-05, | |
| "loss": 0.8011, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.01752743342945957, | |
| "grad_norm": 0.25446394085884094, | |
| "learning_rate": 4.0144148627425993e-05, | |
| "loss": 0.8472, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.017677240552788284, | |
| "grad_norm": 0.2756370007991791, | |
| "learning_rate": 3.933501846281267e-05, | |
| "loss": 0.6757, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.017827047676116998, | |
| "grad_norm": 0.2860568165779114, | |
| "learning_rate": 3.852880399766243e-05, | |
| "loss": 0.8502, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.017976854799445715, | |
| "grad_norm": 0.3061371445655823, | |
| "learning_rate": 3.772572564296005e-05, | |
| "loss": 0.7587, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.01812666192277443, | |
| "grad_norm": 0.26677167415618896, | |
| "learning_rate": 3.6926002952309016e-05, | |
| "loss": 0.7358, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.018276469046103142, | |
| "grad_norm": 0.27612438797950745, | |
| "learning_rate": 3.612985456190778e-05, | |
| "loss": 0.8028, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.018426276169431856, | |
| "grad_norm": 0.31821104884147644, | |
| "learning_rate": 3.533749813077677e-05, | |
| "loss": 0.7335, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.01857608329276057, | |
| "grad_norm": 0.2641538679599762, | |
| "learning_rate": 3.4549150281252636e-05, | |
| "loss": 0.7258, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.018725890416089287, | |
| "grad_norm": 0.2742711007595062, | |
| "learning_rate": 3.3765026539765834e-05, | |
| "loss": 0.8571, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.018875697539418, | |
| "grad_norm": 0.28631827235221863, | |
| "learning_rate": 3.298534127791785e-05, | |
| "loss": 0.8037, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.019025504662746714, | |
| "grad_norm": 0.2540627121925354, | |
| "learning_rate": 3.221030765387417e-05, | |
| "loss": 0.678, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.019175311786075427, | |
| "grad_norm": 0.2871909439563751, | |
| "learning_rate": 3.144013755408895e-05, | |
| "loss": 0.7392, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.01932511890940414, | |
| "grad_norm": 0.28119537234306335, | |
| "learning_rate": 3.0675041535377405e-05, | |
| "loss": 0.6991, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.019474926032732858, | |
| "grad_norm": 0.27180132269859314, | |
| "learning_rate": 2.991522876735154e-05, | |
| "loss": 0.7025, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.019624733156061572, | |
| "grad_norm": 0.2700372040271759, | |
| "learning_rate": 2.916090697523549e-05, | |
| "loss": 0.8442, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.019774540279390285, | |
| "grad_norm": 0.2593667507171631, | |
| "learning_rate": 2.8412282383075363e-05, | |
| "loss": 0.7101, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.019924347402719, | |
| "grad_norm": 0.3013896048069, | |
| "learning_rate": 2.766955965735968e-05, | |
| "loss": 0.8676, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.020074154526047713, | |
| "grad_norm": 0.29744523763656616, | |
| "learning_rate": 2.693294185106562e-05, | |
| "loss": 0.7809, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.020223961649376426, | |
| "grad_norm": 0.29425275325775146, | |
| "learning_rate": 2.6202630348146324e-05, | |
| "loss": 0.8395, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.020373768772705143, | |
| "grad_norm": 0.290644109249115, | |
| "learning_rate": 2.547882480847461e-05, | |
| "loss": 0.8011, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.020523575896033857, | |
| "grad_norm": 0.29827746748924255, | |
| "learning_rate": 2.476172311325783e-05, | |
| "loss": 0.8402, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.02067338301936257, | |
| "grad_norm": 0.2946103811264038, | |
| "learning_rate": 2.405152131093926e-05, | |
| "loss": 0.7247, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.020823190142691284, | |
| "grad_norm": 0.38548269867897034, | |
| "learning_rate": 2.3348413563600325e-05, | |
| "loss": 0.8138, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.020972997266019998, | |
| "grad_norm": 0.2995039224624634, | |
| "learning_rate": 2.2652592093878666e-05, | |
| "loss": 0.7435, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.021122804389348715, | |
| "grad_norm": 0.3135714828968048, | |
| "learning_rate": 2.196424713241637e-05, | |
| "loss": 0.7181, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.02127261151267743, | |
| "grad_norm": 0.3725619912147522, | |
| "learning_rate": 2.128356686585282e-05, | |
| "loss": 0.7686, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.021422418636006142, | |
| "grad_norm": 0.41671574115753174, | |
| "learning_rate": 2.061073738537635e-05, | |
| "loss": 0.7906, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.021572225759334856, | |
| "grad_norm": 0.4549003839492798, | |
| "learning_rate": 1.9945942635848748e-05, | |
| "loss": 0.9617, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.02172203288266357, | |
| "grad_norm": 0.459332138299942, | |
| "learning_rate": 1.928936436551661e-05, | |
| "loss": 0.8265, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.021871840005992287, | |
| "grad_norm": 0.47894296050071716, | |
| "learning_rate": 1.8641182076323148e-05, | |
| "loss": 0.9195, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.022021647129321, | |
| "grad_norm": 0.6261174082756042, | |
| "learning_rate": 1.800157297483417e-05, | |
| "loss": 1.025, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.022171454252649714, | |
| "grad_norm": 0.9317310452461243, | |
| "learning_rate": 1.7370711923791567e-05, | |
| "loss": 0.9746, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.022321261375978427, | |
| "grad_norm": 1.1701384782791138, | |
| "learning_rate": 1.6748771394307585e-05, | |
| "loss": 0.8404, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.02247106849930714, | |
| "grad_norm": 1.098693609237671, | |
| "learning_rate": 1.6135921418712956e-05, | |
| "loss": 0.9547, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.02247106849930714, | |
| "eval_loss": 0.8422527313232422, | |
| "eval_runtime": 568.1109, | |
| "eval_samples_per_second": 19.79, | |
| "eval_steps_per_second": 4.948, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.022620875622635855, | |
| "grad_norm": 0.18898192048072815, | |
| "learning_rate": 1.553232954407171e-05, | |
| "loss": 0.7215, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.022770682745964572, | |
| "grad_norm": 0.2388179451227188, | |
| "learning_rate": 1.4938160786375572e-05, | |
| "loss": 0.6902, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.022920489869293285, | |
| "grad_norm": 0.41913092136383057, | |
| "learning_rate": 1.435357758543015e-05, | |
| "loss": 0.7113, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.023070296992622, | |
| "grad_norm": 0.22527259588241577, | |
| "learning_rate": 1.3778739760445552e-05, | |
| "loss": 0.6841, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.023220104115950713, | |
| "grad_norm": 0.18967139720916748, | |
| "learning_rate": 1.3213804466343421e-05, | |
| "loss": 0.8281, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.023369911239279426, | |
| "grad_norm": 0.1980752944946289, | |
| "learning_rate": 1.2658926150792322e-05, | |
| "loss": 0.7506, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.023519718362608143, | |
| "grad_norm": 0.2360668182373047, | |
| "learning_rate": 1.2114256511983274e-05, | |
| "loss": 0.9188, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.023669525485936857, | |
| "grad_norm": 0.21157562732696533, | |
| "learning_rate": 1.157994445715706e-05, | |
| "loss": 0.9648, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.02381933260926557, | |
| "grad_norm": 0.20437665283679962, | |
| "learning_rate": 1.1056136061894384e-05, | |
| "loss": 0.8487, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.023969139732594284, | |
| "grad_norm": 0.20270663499832153, | |
| "learning_rate": 1.0542974530180327e-05, | |
| "loss": 0.7895, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.024118946855922998, | |
| "grad_norm": 0.2236839085817337, | |
| "learning_rate": 1.0040600155253765e-05, | |
| "loss": 0.9018, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.024268753979251715, | |
| "grad_norm": 0.21588213741779327, | |
| "learning_rate": 9.549150281252633e-06, | |
| "loss": 0.8231, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.02441856110258043, | |
| "grad_norm": 0.22752077877521515, | |
| "learning_rate": 9.068759265665384e-06, | |
| "loss": 0.8567, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.024568368225909142, | |
| "grad_norm": 0.2827790379524231, | |
| "learning_rate": 8.599558442598998e-06, | |
| "loss": 0.8503, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.024718175349237856, | |
| "grad_norm": 0.25776752829551697, | |
| "learning_rate": 8.141676086873572e-06, | |
| "loss": 0.6566, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.02486798247256657, | |
| "grad_norm": 0.2566935420036316, | |
| "learning_rate": 7.695237378953223e-06, | |
| "loss": 0.8419, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.025017789595895283, | |
| "grad_norm": 0.30715909600257874, | |
| "learning_rate": 7.260364370723044e-06, | |
| "loss": 0.7849, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.025167596719224, | |
| "grad_norm": 0.24969881772994995, | |
| "learning_rate": 6.837175952121306e-06, | |
| "loss": 0.7011, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.025317403842552714, | |
| "grad_norm": 0.2686540484428406, | |
| "learning_rate": 6.425787818636131e-06, | |
| "loss": 0.7597, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.025467210965881427, | |
| "grad_norm": 0.2643718719482422, | |
| "learning_rate": 6.026312439675552e-06, | |
| "loss": 0.7392, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.02561701808921014, | |
| "grad_norm": 0.2777179181575775, | |
| "learning_rate": 5.6388590278194096e-06, | |
| "loss": 0.851, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.025766825212538855, | |
| "grad_norm": 0.26481401920318604, | |
| "learning_rate": 5.263533508961827e-06, | |
| "loss": 0.631, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.025916632335867572, | |
| "grad_norm": 0.2540353238582611, | |
| "learning_rate": 4.900438493352055e-06, | |
| "loss": 0.792, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.026066439459196285, | |
| "grad_norm": 0.2759273052215576, | |
| "learning_rate": 4.549673247541875e-06, | |
| "loss": 0.9144, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.026216246582525, | |
| "grad_norm": 0.2873280942440033, | |
| "learning_rate": 4.2113336672471245e-06, | |
| "loss": 0.7864, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.026366053705853713, | |
| "grad_norm": 0.2554738521575928, | |
| "learning_rate": 3.885512251130763e-06, | |
| "loss": 0.6807, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.026515860829182426, | |
| "grad_norm": 0.2898644506931305, | |
| "learning_rate": 3.5722980755146517e-06, | |
| "loss": 0.7932, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.026665667952511143, | |
| "grad_norm": 0.2970656156539917, | |
| "learning_rate": 3.271776770026963e-06, | |
| "loss": 0.7186, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.026815475075839857, | |
| "grad_norm": 0.2831547260284424, | |
| "learning_rate": 2.9840304941919415e-06, | |
| "loss": 0.8004, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.02696528219916857, | |
| "grad_norm": 0.29099857807159424, | |
| "learning_rate": 2.7091379149682685e-06, | |
| "loss": 0.7029, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.027115089322497284, | |
| "grad_norm": 0.272522896528244, | |
| "learning_rate": 2.4471741852423237e-06, | |
| "loss": 0.7507, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.027264896445825998, | |
| "grad_norm": 0.29832085967063904, | |
| "learning_rate": 2.1982109232821178e-06, | |
| "loss": 0.9112, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.027414703569154715, | |
| "grad_norm": 0.2714107632637024, | |
| "learning_rate": 1.962316193157593e-06, | |
| "loss": 0.8058, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.02756451069248343, | |
| "grad_norm": 0.2914239764213562, | |
| "learning_rate": 1.7395544861325718e-06, | |
| "loss": 0.7447, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.027714317815812142, | |
| "grad_norm": 0.296657532453537, | |
| "learning_rate": 1.5299867030334814e-06, | |
| "loss": 0.8355, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.027864124939140856, | |
| "grad_norm": 0.27173134684562683, | |
| "learning_rate": 1.333670137599713e-06, | |
| "loss": 0.7745, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.02801393206246957, | |
| "grad_norm": 0.3543894290924072, | |
| "learning_rate": 1.1506584608200367e-06, | |
| "loss": 0.9567, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.028163739185798283, | |
| "grad_norm": 0.3419502377510071, | |
| "learning_rate": 9.810017062595322e-07, | |
| "loss": 0.828, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.028313546309127, | |
| "grad_norm": 0.35001078248023987, | |
| "learning_rate": 8.247462563808817e-07, | |
| "loss": 0.8224, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.028463353432455714, | |
| "grad_norm": 0.3231000006198883, | |
| "learning_rate": 6.819348298638839e-07, | |
| "loss": 0.726, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.028613160555784427, | |
| "grad_norm": 0.43736642599105835, | |
| "learning_rate": 5.526064699265753e-07, | |
| "loss": 0.8406, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.02876296767911314, | |
| "grad_norm": 0.4133260250091553, | |
| "learning_rate": 4.367965336512403e-07, | |
| "loss": 0.8529, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.028912774802441855, | |
| "grad_norm": 0.3894733190536499, | |
| "learning_rate": 3.3453668231809286e-07, | |
| "loss": 0.7316, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.029062581925770572, | |
| "grad_norm": 0.4218355417251587, | |
| "learning_rate": 2.458548727494292e-07, | |
| "loss": 0.8377, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.029212389049099285, | |
| "grad_norm": 0.4262062907218933, | |
| "learning_rate": 1.7077534966650766e-07, | |
| "loss": 1.0527, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.029362196172428, | |
| "grad_norm": 0.48938897252082825, | |
| "learning_rate": 1.0931863906127327e-07, | |
| "loss": 1.0953, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.029512003295756713, | |
| "grad_norm": 0.5830502510070801, | |
| "learning_rate": 6.150154258476315e-08, | |
| "loss": 1.0599, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.029661810419085426, | |
| "grad_norm": 0.6355454921722412, | |
| "learning_rate": 2.7337132953697554e-08, | |
| "loss": 0.9897, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.029811617542414143, | |
| "grad_norm": 0.7947019338607788, | |
| "learning_rate": 6.834750376549792e-09, | |
| "loss": 1.12, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.029961424665742857, | |
| "grad_norm": 1.0451745986938477, | |
| "learning_rate": 0.0, | |
| "loss": 0.9059, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.029961424665742857, | |
| "eval_loss": 0.8421279191970825, | |
| "eval_runtime": 568.7717, | |
| "eval_samples_per_second": 19.767, | |
| "eval_steps_per_second": 4.942, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2221066769963418e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |