| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.20390824129141885, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0006796941376380628, | |
| "grad_norm": 9.122925758361816, | |
| "learning_rate": 0.0, | |
| "loss": 6.6013, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0013593882752761257, | |
| "grad_norm": 9.549845695495605, | |
| "learning_rate": 6.756756756756758e-07, | |
| "loss": 6.7658, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0027187765505522514, | |
| "grad_norm": 8.209335327148438, | |
| "learning_rate": 2.0270270270270273e-06, | |
| "loss": 6.5902, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0040781648258283775, | |
| "grad_norm": 6.113947868347168, | |
| "learning_rate": 3.3783783783783788e-06, | |
| "loss": 6.5818, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.005437553101104503, | |
| "grad_norm": 6.703476428985596, | |
| "learning_rate": 4.72972972972973e-06, | |
| "loss": 6.5232, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.006796941376380629, | |
| "grad_norm": 13.405858039855957, | |
| "learning_rate": 6.081081081081082e-06, | |
| "loss": 6.5521, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.008156329651656755, | |
| "grad_norm": 11.226860046386719, | |
| "learning_rate": 7.432432432432433e-06, | |
| "loss": 6.584, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.009515717926932881, | |
| "grad_norm": 7.006751537322998, | |
| "learning_rate": 8.783783783783785e-06, | |
| "loss": 6.518, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.010875106202209005, | |
| "grad_norm": 6.206234455108643, | |
| "learning_rate": 1.0135135135135136e-05, | |
| "loss": 6.4523, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.012234494477485132, | |
| "grad_norm": 3.75468111038208, | |
| "learning_rate": 1.1486486486486488e-05, | |
| "loss": 6.4488, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.013593882752761258, | |
| "grad_norm": 3.5509755611419678, | |
| "learning_rate": 1.2837837837837838e-05, | |
| "loss": 6.3353, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.014953271028037384, | |
| "grad_norm": 2.838531494140625, | |
| "learning_rate": 1.4189189189189189e-05, | |
| "loss": 6.2737, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.01631265930331351, | |
| "grad_norm": 2.849353790283203, | |
| "learning_rate": 1.554054054054054e-05, | |
| "loss": 6.2386, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.017672047578589634, | |
| "grad_norm": 3.192340850830078, | |
| "learning_rate": 1.6891891891891892e-05, | |
| "loss": 6.1459, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.019031435853865762, | |
| "grad_norm": 3.079922914505005, | |
| "learning_rate": 1.8243243243243244e-05, | |
| "loss": 6.1783, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.020390824129141887, | |
| "grad_norm": 3.689027786254883, | |
| "learning_rate": 1.9594594594594595e-05, | |
| "loss": 5.9851, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.02175021240441801, | |
| "grad_norm": 2.39050555229187, | |
| "learning_rate": 2.0945945945945947e-05, | |
| "loss": 6.0281, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.02310960067969414, | |
| "grad_norm": 2.3905773162841797, | |
| "learning_rate": 2.2297297297297298e-05, | |
| "loss": 6.0399, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.024468988954970263, | |
| "grad_norm": 2.676403045654297, | |
| "learning_rate": 2.364864864864865e-05, | |
| "loss": 6.0026, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.025828377230246388, | |
| "grad_norm": 2.220277786254883, | |
| "learning_rate": 2.5e-05, | |
| "loss": 5.9469, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.027187765505522515, | |
| "grad_norm": 3.7453274726867676, | |
| "learning_rate": 2.635135135135135e-05, | |
| "loss": 5.804, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.02854715378079864, | |
| "grad_norm": 4.522032737731934, | |
| "learning_rate": 2.7702702702702704e-05, | |
| "loss": 5.8153, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.029906542056074768, | |
| "grad_norm": 3.07928204536438, | |
| "learning_rate": 2.9054054054054052e-05, | |
| "loss": 5.8357, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.031265930331350895, | |
| "grad_norm": 3.2400898933410645, | |
| "learning_rate": 3.0405405405405407e-05, | |
| "loss": 5.8705, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.03262531860662702, | |
| "grad_norm": 5.057046890258789, | |
| "learning_rate": 3.175675675675676e-05, | |
| "loss": 5.6707, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.033984706881903144, | |
| "grad_norm": 4.462399005889893, | |
| "learning_rate": 3.310810810810811e-05, | |
| "loss": 5.7012, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03534409515717927, | |
| "grad_norm": 3.095761299133301, | |
| "learning_rate": 3.445945945945946e-05, | |
| "loss": 5.6685, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.03670348343245539, | |
| "grad_norm": 3.478303909301758, | |
| "learning_rate": 3.581081081081081e-05, | |
| "loss": 5.6353, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.038062871707731524, | |
| "grad_norm": 4.6464433670043945, | |
| "learning_rate": 3.7162162162162165e-05, | |
| "loss": 5.6277, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.03942225998300765, | |
| "grad_norm": 4.2293572425842285, | |
| "learning_rate": 3.851351351351351e-05, | |
| "loss": 5.5346, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.04078164825828377, | |
| "grad_norm": 4.188422679901123, | |
| "learning_rate": 3.986486486486487e-05, | |
| "loss": 5.5544, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0421410365335599, | |
| "grad_norm": 3.0673420429229736, | |
| "learning_rate": 4.1216216216216216e-05, | |
| "loss": 5.53, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.04350042480883602, | |
| "grad_norm": 3.3032662868499756, | |
| "learning_rate": 4.256756756756757e-05, | |
| "loss": 5.5605, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.044859813084112146, | |
| "grad_norm": 3.896825075149536, | |
| "learning_rate": 4.391891891891892e-05, | |
| "loss": 5.4221, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.04621920135938828, | |
| "grad_norm": 4.151010990142822, | |
| "learning_rate": 4.5270270270270274e-05, | |
| "loss": 5.3967, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.0475785896346644, | |
| "grad_norm": 3.938117265701294, | |
| "learning_rate": 4.662162162162162e-05, | |
| "loss": 5.4716, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.048937977909940526, | |
| "grad_norm": 3.1217191219329834, | |
| "learning_rate": 4.797297297297298e-05, | |
| "loss": 5.4567, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.05029736618521665, | |
| "grad_norm": 3.293020725250244, | |
| "learning_rate": 4.9324324324324325e-05, | |
| "loss": 5.4291, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.051656754460492775, | |
| "grad_norm": 3.9366047382354736, | |
| "learning_rate": 5.067567567567568e-05, | |
| "loss": 5.378, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.053016142735768906, | |
| "grad_norm": 4.825038909912109, | |
| "learning_rate": 5.202702702702703e-05, | |
| "loss": 5.3462, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.05437553101104503, | |
| "grad_norm": 4.513136386871338, | |
| "learning_rate": 5.337837837837838e-05, | |
| "loss": 5.4209, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.055734919286321155, | |
| "grad_norm": 4.524239540100098, | |
| "learning_rate": 5.472972972972973e-05, | |
| "loss": 5.3715, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.05709430756159728, | |
| "grad_norm": 5.1905317306518555, | |
| "learning_rate": 5.6081081081081086e-05, | |
| "loss": 5.2334, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.058453695836873404, | |
| "grad_norm": 4.657945156097412, | |
| "learning_rate": 5.7432432432432434e-05, | |
| "loss": 5.2899, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.059813084112149535, | |
| "grad_norm": 3.7982685565948486, | |
| "learning_rate": 5.878378378378379e-05, | |
| "loss": 5.2191, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.06117247238742566, | |
| "grad_norm": 3.5835001468658447, | |
| "learning_rate": 6.013513513513514e-05, | |
| "loss": 5.1858, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.06253186066270179, | |
| "grad_norm": 4.594094276428223, | |
| "learning_rate": 6.14864864864865e-05, | |
| "loss": 5.2013, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.06389124893797792, | |
| "grad_norm": 3.8048019409179688, | |
| "learning_rate": 6.283783783783784e-05, | |
| "loss": 5.1493, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.06525063721325404, | |
| "grad_norm": 3.9920341968536377, | |
| "learning_rate": 6.41891891891892e-05, | |
| "loss": 5.0612, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.06661002548853016, | |
| "grad_norm": 3.4856226444244385, | |
| "learning_rate": 6.554054054054054e-05, | |
| "loss": 5.1978, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.06796941376380629, | |
| "grad_norm": 3.485684871673584, | |
| "learning_rate": 6.68918918918919e-05, | |
| "loss": 5.2438, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.06932880203908241, | |
| "grad_norm": 2.92802095413208, | |
| "learning_rate": 6.824324324324325e-05, | |
| "loss": 5.0203, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.07068819031435854, | |
| "grad_norm": 3.472078561782837, | |
| "learning_rate": 6.95945945945946e-05, | |
| "loss": 5.1175, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.07204757858963466, | |
| "grad_norm": 3.5529918670654297, | |
| "learning_rate": 7.094594594594594e-05, | |
| "loss": 5.1952, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.07340696686491079, | |
| "grad_norm": 5.627261638641357, | |
| "learning_rate": 7.229729729729731e-05, | |
| "loss": 5.0469, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.07476635514018691, | |
| "grad_norm": 4.3943305015563965, | |
| "learning_rate": 7.364864864864865e-05, | |
| "loss": 5.0147, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.07612574341546305, | |
| "grad_norm": 2.405991792678833, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 5.0281, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.07748513169073917, | |
| "grad_norm": 3.361250162124634, | |
| "learning_rate": 7.635135135135135e-05, | |
| "loss": 4.9389, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.0788445199660153, | |
| "grad_norm": 3.5558111667633057, | |
| "learning_rate": 7.77027027027027e-05, | |
| "loss": 4.9327, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.08020390824129142, | |
| "grad_norm": 3.6313676834106445, | |
| "learning_rate": 7.905405405405406e-05, | |
| "loss": 4.9751, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.08156329651656755, | |
| "grad_norm": 3.766629219055176, | |
| "learning_rate": 8.040540540540541e-05, | |
| "loss": 4.9362, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.08292268479184367, | |
| "grad_norm": 3.8239798545837402, | |
| "learning_rate": 8.175675675675675e-05, | |
| "loss": 4.9906, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.0842820730671198, | |
| "grad_norm": 3.1650514602661133, | |
| "learning_rate": 8.310810810810811e-05, | |
| "loss": 4.8465, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.08564146134239592, | |
| "grad_norm": 3.0690271854400635, | |
| "learning_rate": 8.445945945945946e-05, | |
| "loss": 4.8147, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.08700084961767204, | |
| "grad_norm": 3.1290276050567627, | |
| "learning_rate": 8.581081081081082e-05, | |
| "loss": 4.8495, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.08836023789294817, | |
| "grad_norm": 3.1333677768707275, | |
| "learning_rate": 8.716216216216216e-05, | |
| "loss": 4.8112, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.08971962616822429, | |
| "grad_norm": 2.8959381580352783, | |
| "learning_rate": 8.851351351351352e-05, | |
| "loss": 4.7989, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.09107901444350043, | |
| "grad_norm": 2.715139389038086, | |
| "learning_rate": 8.986486486486487e-05, | |
| "loss": 4.7689, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.09243840271877656, | |
| "grad_norm": 2.3525729179382324, | |
| "learning_rate": 9.121621621621623e-05, | |
| "loss": 4.7503, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.09379779099405268, | |
| "grad_norm": 2.5053319931030273, | |
| "learning_rate": 9.256756756756757e-05, | |
| "loss": 4.8267, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.0951571792693288, | |
| "grad_norm": 3.2830920219421387, | |
| "learning_rate": 9.391891891891892e-05, | |
| "loss": 4.734, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.09651656754460493, | |
| "grad_norm": 3.367637872695923, | |
| "learning_rate": 9.527027027027028e-05, | |
| "loss": 4.6487, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.09787595581988105, | |
| "grad_norm": 4.157845973968506, | |
| "learning_rate": 9.662162162162163e-05, | |
| "loss": 4.7186, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.09923534409515718, | |
| "grad_norm": 3.549011707305908, | |
| "learning_rate": 9.797297297297297e-05, | |
| "loss": 4.7013, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.1005947323704333, | |
| "grad_norm": 2.438737392425537, | |
| "learning_rate": 9.932432432432433e-05, | |
| "loss": 4.6463, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.10195412064570943, | |
| "grad_norm": 2.62125301361084, | |
| "learning_rate": 9.999996843793759e-05, | |
| "loss": 4.6266, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.10331350892098555, | |
| "grad_norm": 2.5557775497436523, | |
| "learning_rate": 9.999971594167742e-05, | |
| "loss": 4.6659, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.10467289719626169, | |
| "grad_norm": 2.435065746307373, | |
| "learning_rate": 9.999921095043215e-05, | |
| "loss": 4.6833, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.10603228547153781, | |
| "grad_norm": 2.715564012527466, | |
| "learning_rate": 9.999845346675197e-05, | |
| "loss": 4.6256, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.10739167374681394, | |
| "grad_norm": 2.129850149154663, | |
| "learning_rate": 9.999744349446207e-05, | |
| "loss": 4.4834, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.10875106202209006, | |
| "grad_norm": 2.3702259063720703, | |
| "learning_rate": 9.99961810386628e-05, | |
| "loss": 4.5664, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.11011045029736619, | |
| "grad_norm": 1.9370046854019165, | |
| "learning_rate": 9.999466610572944e-05, | |
| "loss": 4.5847, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.11146983857264231, | |
| "grad_norm": 2.4077095985412598, | |
| "learning_rate": 9.999289870331232e-05, | |
| "loss": 4.6685, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.11282922684791843, | |
| "grad_norm": 3.0132172107696533, | |
| "learning_rate": 9.999087884033666e-05, | |
| "loss": 4.5605, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.11418861512319456, | |
| "grad_norm": 3.6081573963165283, | |
| "learning_rate": 9.998860652700263e-05, | |
| "loss": 4.4315, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.11554800339847068, | |
| "grad_norm": 2.687088966369629, | |
| "learning_rate": 9.998608177478525e-05, | |
| "loss": 4.5634, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.11690739167374681, | |
| "grad_norm": 2.3163015842437744, | |
| "learning_rate": 9.998330459643437e-05, | |
| "loss": 4.3725, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.11826677994902295, | |
| "grad_norm": 2.576303720474243, | |
| "learning_rate": 9.998027500597451e-05, | |
| "loss": 4.4502, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.11962616822429907, | |
| "grad_norm": 3.0173189640045166, | |
| "learning_rate": 9.997699301870488e-05, | |
| "loss": 4.4904, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.1209855564995752, | |
| "grad_norm": 1.8845309019088745, | |
| "learning_rate": 9.99734586511993e-05, | |
| "loss": 4.4285, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.12234494477485132, | |
| "grad_norm": 1.8597114086151123, | |
| "learning_rate": 9.996967192130606e-05, | |
| "loss": 4.4114, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.12370433305012744, | |
| "grad_norm": 1.9403643608093262, | |
| "learning_rate": 9.996563284814788e-05, | |
| "loss": 4.3586, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.12506372132540358, | |
| "grad_norm": 2.1628377437591553, | |
| "learning_rate": 9.99613414521218e-05, | |
| "loss": 4.4004, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.1264231096006797, | |
| "grad_norm": 2.213683843612671, | |
| "learning_rate": 9.995679775489906e-05, | |
| "loss": 4.4017, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.12778249787595583, | |
| "grad_norm": 1.9236798286437988, | |
| "learning_rate": 9.995200177942499e-05, | |
| "loss": 4.3356, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.12914188615123195, | |
| "grad_norm": 2.8310718536376953, | |
| "learning_rate": 9.994695354991892e-05, | |
| "loss": 4.2476, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.13050127442650808, | |
| "grad_norm": 2.613215446472168, | |
| "learning_rate": 9.994165309187406e-05, | |
| "loss": 4.4249, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.1318606627017842, | |
| "grad_norm": 3.2933475971221924, | |
| "learning_rate": 9.993610043205735e-05, | |
| "loss": 4.359, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.13322005097706033, | |
| "grad_norm": 2.660553455352783, | |
| "learning_rate": 9.993029559850932e-05, | |
| "loss": 4.3591, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.13457943925233645, | |
| "grad_norm": 2.223825693130493, | |
| "learning_rate": 9.992423862054397e-05, | |
| "loss": 4.2638, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.13593882752761258, | |
| "grad_norm": 1.6391338109970093, | |
| "learning_rate": 9.991792952874857e-05, | |
| "loss": 4.2506, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1372982158028887, | |
| "grad_norm": 1.568050742149353, | |
| "learning_rate": 9.991136835498363e-05, | |
| "loss": 4.1789, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.13865760407816483, | |
| "grad_norm": 1.8366698026657104, | |
| "learning_rate": 9.990455513238257e-05, | |
| "loss": 4.2361, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.14001699235344095, | |
| "grad_norm": 2.0478951930999756, | |
| "learning_rate": 9.98974898953517e-05, | |
| "loss": 4.2613, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.14137638062871707, | |
| "grad_norm": 1.7681331634521484, | |
| "learning_rate": 9.989017267956994e-05, | |
| "loss": 4.2437, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.1427357689039932, | |
| "grad_norm": 2.2257468700408936, | |
| "learning_rate": 9.988260352198872e-05, | |
| "loss": 4.1724, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.14409515717926932, | |
| "grad_norm": 1.6590179204940796, | |
| "learning_rate": 9.987478246083175e-05, | |
| "loss": 4.1619, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.14545454545454545, | |
| "grad_norm": 2.029710292816162, | |
| "learning_rate": 9.986670953559482e-05, | |
| "loss": 4.2611, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.14681393372982157, | |
| "grad_norm": 1.7355066537857056, | |
| "learning_rate": 9.985838478704563e-05, | |
| "loss": 4.222, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.1481733220050977, | |
| "grad_norm": 1.91265869140625, | |
| "learning_rate": 9.984980825722356e-05, | |
| "loss": 4.0887, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.14953271028037382, | |
| "grad_norm": 2.1522412300109863, | |
| "learning_rate": 9.984097998943947e-05, | |
| "loss": 4.1331, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.15089209855564995, | |
| "grad_norm": 1.7838095426559448, | |
| "learning_rate": 9.983190002827546e-05, | |
| "loss": 4.0928, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.1522514868309261, | |
| "grad_norm": 1.8782153129577637, | |
| "learning_rate": 9.982256841958472e-05, | |
| "loss": 4.2071, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.15361087510620222, | |
| "grad_norm": 2.179396390914917, | |
| "learning_rate": 9.981298521049118e-05, | |
| "loss": 4.0642, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.15497026338147835, | |
| "grad_norm": 2.1441640853881836, | |
| "learning_rate": 9.980315044938939e-05, | |
| "loss": 4.0892, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.15632965165675447, | |
| "grad_norm": 2.6898701190948486, | |
| "learning_rate": 9.979306418594417e-05, | |
| "loss": 4.1155, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.1576890399320306, | |
| "grad_norm": 2.3028266429901123, | |
| "learning_rate": 9.97827264710904e-05, | |
| "loss": 4.1381, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.15904842820730672, | |
| "grad_norm": 1.8704326152801514, | |
| "learning_rate": 9.977213735703283e-05, | |
| "loss": 4.1299, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.16040781648258284, | |
| "grad_norm": 1.5334903001785278, | |
| "learning_rate": 9.976129689724574e-05, | |
| "loss": 4.1585, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.16176720475785897, | |
| "grad_norm": 1.5391136407852173, | |
| "learning_rate": 9.975020514647267e-05, | |
| "loss": 4.0774, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.1631265930331351, | |
| "grad_norm": 1.731969952583313, | |
| "learning_rate": 9.973886216072614e-05, | |
| "loss": 4.1801, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.16448598130841122, | |
| "grad_norm": 1.355950117111206, | |
| "learning_rate": 9.972726799728744e-05, | |
| "loss": 4.1208, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.16584536958368734, | |
| "grad_norm": 1.6355708837509155, | |
| "learning_rate": 9.971542271470625e-05, | |
| "loss": 4.0135, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.16720475785896347, | |
| "grad_norm": 1.612067461013794, | |
| "learning_rate": 9.970332637280041e-05, | |
| "loss": 4.008, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.1685641461342396, | |
| "grad_norm": 1.5609122514724731, | |
| "learning_rate": 9.969097903265558e-05, | |
| "loss": 3.9615, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.16992353440951571, | |
| "grad_norm": 2.1877589225769043, | |
| "learning_rate": 9.967838075662495e-05, | |
| "loss": 4.0187, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.17128292268479184, | |
| "grad_norm": 2.0836243629455566, | |
| "learning_rate": 9.966553160832889e-05, | |
| "loss": 4.0108, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.17264231096006796, | |
| "grad_norm": 1.8262373208999634, | |
| "learning_rate": 9.96524316526547e-05, | |
| "loss": 3.9729, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.1740016992353441, | |
| "grad_norm": 1.4357279539108276, | |
| "learning_rate": 9.96390809557562e-05, | |
| "loss": 3.9418, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.1753610875106202, | |
| "grad_norm": 1.4747521877288818, | |
| "learning_rate": 9.962547958505346e-05, | |
| "loss": 4.0073, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.17672047578589634, | |
| "grad_norm": 1.5109456777572632, | |
| "learning_rate": 9.961162760923244e-05, | |
| "loss": 4.0114, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.17807986406117246, | |
| "grad_norm": 1.6962803602218628, | |
| "learning_rate": 9.959752509824462e-05, | |
| "loss": 3.8997, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.17943925233644858, | |
| "grad_norm": 1.2874037027359009, | |
| "learning_rate": 9.958317212330665e-05, | |
| "loss": 3.9746, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.18079864061172474, | |
| "grad_norm": 1.4089356660842896, | |
| "learning_rate": 9.956856875690006e-05, | |
| "loss": 3.8799, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.18215802888700086, | |
| "grad_norm": 1.4761899709701538, | |
| "learning_rate": 9.95537150727708e-05, | |
| "loss": 3.9084, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.18351741716227699, | |
| "grad_norm": 1.2963216304779053, | |
| "learning_rate": 9.953861114592889e-05, | |
| "loss": 3.884, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1848768054375531, | |
| "grad_norm": 1.2376818656921387, | |
| "learning_rate": 9.952325705264806e-05, | |
| "loss": 3.9434, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.18623619371282923, | |
| "grad_norm": 1.6393024921417236, | |
| "learning_rate": 9.950765287046543e-05, | |
| "loss": 3.9175, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.18759558198810536, | |
| "grad_norm": 1.2873233556747437, | |
| "learning_rate": 9.949179867818099e-05, | |
| "loss": 3.9513, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.18895497026338148, | |
| "grad_norm": 1.3314156532287598, | |
| "learning_rate": 9.947569455585726e-05, | |
| "loss": 3.9345, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.1903143585386576, | |
| "grad_norm": 1.392342448234558, | |
| "learning_rate": 9.945934058481892e-05, | |
| "loss": 3.8092, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.19167374681393373, | |
| "grad_norm": 1.4349101781845093, | |
| "learning_rate": 9.944273684765235e-05, | |
| "loss": 3.8548, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.19303313508920986, | |
| "grad_norm": 1.2190157175064087, | |
| "learning_rate": 9.942588342820521e-05, | |
| "loss": 3.9121, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.19439252336448598, | |
| "grad_norm": 1.4537711143493652, | |
| "learning_rate": 9.94087804115861e-05, | |
| "loss": 3.8502, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.1957519116397621, | |
| "grad_norm": 1.6733758449554443, | |
| "learning_rate": 9.939142788416398e-05, | |
| "loss": 3.8743, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.19711129991503823, | |
| "grad_norm": 1.4261025190353394, | |
| "learning_rate": 9.937382593356793e-05, | |
| "loss": 3.8947, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.19847068819031435, | |
| "grad_norm": 1.6536645889282227, | |
| "learning_rate": 9.93559746486865e-05, | |
| "loss": 3.9158, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.19983007646559048, | |
| "grad_norm": 1.71151864528656, | |
| "learning_rate": 9.933787411966742e-05, | |
| "loss": 3.8466, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.2011894647408666, | |
| "grad_norm": 1.8195589780807495, | |
| "learning_rate": 9.931952443791703e-05, | |
| "loss": 3.8113, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.20254885301614273, | |
| "grad_norm": 1.5555843114852905, | |
| "learning_rate": 9.930092569609996e-05, | |
| "loss": 3.8505, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.20390824129141885, | |
| "grad_norm": 1.402797818183899, | |
| "learning_rate": 9.928207798813849e-05, | |
| "loss": 3.8856, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 2944, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.377550336196608e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |