| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9999059177721329, | |
| "eval_steps": 20, | |
| "global_step": 5314, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0037632891146862357, | |
| "grad_norm": 0.15685315430164337, | |
| "learning_rate": 0.0002, | |
| "loss": 0.4618, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0075265782293724715, | |
| "grad_norm": 0.10632659494876862, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2985, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.011289867344058707, | |
| "grad_norm": 0.12228264659643173, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2689, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.015053156458744943, | |
| "grad_norm": 0.09272768348455429, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2249, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.01881644557343118, | |
| "grad_norm": 0.11586301028728485, | |
| "learning_rate": 0.0002, | |
| "loss": 0.23, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.022579734688117414, | |
| "grad_norm": 0.08657937496900558, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2189, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.02634302380280365, | |
| "grad_norm": 0.08374184370040894, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1987, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.030106312917489886, | |
| "grad_norm": 0.08528616279363632, | |
| "learning_rate": 0.0002, | |
| "loss": 0.2065, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.03386960203217612, | |
| "grad_norm": 0.17550894618034363, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1904, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.03763289114686236, | |
| "grad_norm": 0.09562012553215027, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1824, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.04139618026154859, | |
| "grad_norm": 0.12333519756793976, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1702, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.04515946937623483, | |
| "grad_norm": 0.11404936760663986, | |
| "learning_rate": 0.0002, | |
| "loss": 0.18, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.048922758490921064, | |
| "grad_norm": 0.08656694740056992, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1749, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.0526860476056073, | |
| "grad_norm": 0.09797225147485733, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1731, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.056449336720293536, | |
| "grad_norm": 0.09765412658452988, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1578, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06021262583497977, | |
| "grad_norm": 0.07540671527385712, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1693, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.06397591494966601, | |
| "grad_norm": 0.08590289205312729, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1607, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.06773920406435224, | |
| "grad_norm": 0.09767664223909378, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1419, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.07150249317903848, | |
| "grad_norm": 0.10479151457548141, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1629, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.07526578229372471, | |
| "grad_norm": 0.08791118115186691, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1484, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.07902907140841095, | |
| "grad_norm": 0.10221686214208603, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1499, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.08279236052309719, | |
| "grad_norm": 0.09131903946399689, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1485, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.08655564963778342, | |
| "grad_norm": 0.10372031480073929, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1441, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.09031893875246966, | |
| "grad_norm": 0.09649350494146347, | |
| "learning_rate": 0.0002, | |
| "loss": 0.147, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.0940822278671559, | |
| "grad_norm": 0.09961670637130737, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1465, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09784551698184213, | |
| "grad_norm": 0.08490657806396484, | |
| "learning_rate": 0.0002, | |
| "loss": 0.132, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.10160880609652836, | |
| "grad_norm": 0.08765380829572678, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1283, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.1053720952112146, | |
| "grad_norm": 0.09319768846035004, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1382, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.10913538432590084, | |
| "grad_norm": 0.08941628783941269, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1335, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.11289867344058707, | |
| "grad_norm": 0.0971933901309967, | |
| "learning_rate": 0.0002, | |
| "loss": 0.137, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.11666196255527331, | |
| "grad_norm": 0.07488075643777847, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1283, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.12042525166995954, | |
| "grad_norm": 0.08711710572242737, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1292, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.12418854078464578, | |
| "grad_norm": 0.08043856918811798, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1269, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.12795182989933201, | |
| "grad_norm": 0.07097792625427246, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1364, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.13171511901401825, | |
| "grad_norm": 0.0742156058549881, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1277, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1354784081287045, | |
| "grad_norm": 0.09956187009811401, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1314, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.13924169724339072, | |
| "grad_norm": 0.08661571145057678, | |
| "learning_rate": 0.0002, | |
| "loss": 0.124, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.14300498635807696, | |
| "grad_norm": 0.08070897310972214, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1147, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.1467682754727632, | |
| "grad_norm": 0.09516704082489014, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1168, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.15053156458744943, | |
| "grad_norm": 0.09076276421546936, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1178, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.15429485370213566, | |
| "grad_norm": 0.0896017774939537, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1192, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.1580581428168219, | |
| "grad_norm": 0.07477965205907822, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1127, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.16182143193150814, | |
| "grad_norm": 0.08405464887619019, | |
| "learning_rate": 0.0002, | |
| "loss": 0.119, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.16558472104619437, | |
| "grad_norm": 0.07539790868759155, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1213, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.1693480101608806, | |
| "grad_norm": 0.08806908130645752, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1192, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.17311129927556684, | |
| "grad_norm": 0.08064749836921692, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1141, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.17687458839025308, | |
| "grad_norm": 0.09680119901895523, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1042, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.18063787750493931, | |
| "grad_norm": 0.09092500060796738, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1067, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.18440116661962555, | |
| "grad_norm": 0.07870171219110489, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1101, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.1881644557343118, | |
| "grad_norm": 0.06842092424631119, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1045, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.19192774484899802, | |
| "grad_norm": 0.08229291439056396, | |
| "learning_rate": 0.0002, | |
| "loss": 0.114, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.19569103396368426, | |
| "grad_norm": 0.07617371529340744, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1077, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.1994543230783705, | |
| "grad_norm": 0.09529408067464828, | |
| "learning_rate": 0.0002, | |
| "loss": 0.101, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.20321761219305673, | |
| "grad_norm": 0.07952335476875305, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1065, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.20698090130774296, | |
| "grad_norm": 0.07625720649957657, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0987, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2107441904224292, | |
| "grad_norm": 0.07906854152679443, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0986, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.21450747953711544, | |
| "grad_norm": 0.08430849015712738, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1085, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.21827076865180167, | |
| "grad_norm": 0.0861297994852066, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1066, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.2220340577664879, | |
| "grad_norm": 0.07576191425323486, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1051, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.22579734688117414, | |
| "grad_norm": 0.08132428675889969, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1036, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.22956063599586038, | |
| "grad_norm": 0.06960251182317734, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1039, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.23332392511054661, | |
| "grad_norm": 0.08230841159820557, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1046, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.23708721422523285, | |
| "grad_norm": 0.07119760662317276, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1064, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.2408505033399191, | |
| "grad_norm": 0.06965576857328415, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0964, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.24461379245460532, | |
| "grad_norm": 0.07533243298530579, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0973, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.24837708156929156, | |
| "grad_norm": 0.07530753314495087, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1002, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.2521403706839778, | |
| "grad_norm": 0.0701604038476944, | |
| "learning_rate": 0.0002, | |
| "loss": 0.1014, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.25590365979866403, | |
| "grad_norm": 0.08768032491207123, | |
| "learning_rate": 0.0002, | |
| "loss": 0.099, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.2596669489133503, | |
| "grad_norm": 0.0789860337972641, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0947, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.2634302380280365, | |
| "grad_norm": 0.09132009744644165, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0975, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.26719352714272276, | |
| "grad_norm": 0.07386859506368637, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0997, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.270956816257409, | |
| "grad_norm": 0.07243089377880096, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0956, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.27472010537209524, | |
| "grad_norm": 0.0814971774816513, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0869, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.27848339448678144, | |
| "grad_norm": 0.07246191054582596, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0958, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.2822466836014677, | |
| "grad_norm": 0.08997531235218048, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0875, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.2860099727161539, | |
| "grad_norm": 0.08498572558164597, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0872, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.2897732618308402, | |
| "grad_norm": 0.08210768550634384, | |
| "learning_rate": 0.0002, | |
| "loss": 0.085, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.2935365509455264, | |
| "grad_norm": 0.09801402688026428, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0937, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.29729984006021265, | |
| "grad_norm": 0.07310175150632858, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0898, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.30106312917489886, | |
| "grad_norm": 0.0679851844906807, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0853, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.3048264182895851, | |
| "grad_norm": 0.0764717161655426, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0966, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.30858970740427133, | |
| "grad_norm": 0.0813748836517334, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0914, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.3123529965189576, | |
| "grad_norm": 0.07560984790325165, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0889, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.3161162856336438, | |
| "grad_norm": 0.07131631672382355, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0862, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.31987957474833006, | |
| "grad_norm": 0.07922184467315674, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0908, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.3236428638630163, | |
| "grad_norm": 0.09043222665786743, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0887, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.32740615297770254, | |
| "grad_norm": 0.08574160188436508, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0895, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.33116944209238874, | |
| "grad_norm": 0.0885438472032547, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0886, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.334932731207075, | |
| "grad_norm": 0.08546678721904755, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0871, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.3386960203217612, | |
| "grad_norm": 0.0922817662358284, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0877, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.3424593094364475, | |
| "grad_norm": 0.08879639208316803, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0877, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.3462225985511337, | |
| "grad_norm": 0.09202056378126144, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0827, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.34998588766581995, | |
| "grad_norm": 0.0998295396566391, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0835, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.35374917678050616, | |
| "grad_norm": 0.09490591287612915, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0862, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.3575124658951924, | |
| "grad_norm": 0.08920489251613617, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0859, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.36127575500987863, | |
| "grad_norm": 0.0803561583161354, | |
| "learning_rate": 0.0002, | |
| "loss": 0.081, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.3650390441245649, | |
| "grad_norm": 0.08496900647878647, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0835, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.3688023332392511, | |
| "grad_norm": 0.08658849447965622, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0883, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.37256562235393736, | |
| "grad_norm": 0.08497461676597595, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0811, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.3763289114686236, | |
| "grad_norm": 0.06599749624729156, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0714, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.38009220058330984, | |
| "grad_norm": 0.07988911867141724, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0874, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.38385548969799604, | |
| "grad_norm": 0.07323348522186279, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0809, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.3876187788126823, | |
| "grad_norm": 0.07455869019031525, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0819, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.3913820679273685, | |
| "grad_norm": 0.08383121341466904, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0811, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.3951453570420548, | |
| "grad_norm": 0.07616332918405533, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0802, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.398908646156741, | |
| "grad_norm": 0.08373293280601501, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0878, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.40267193527142725, | |
| "grad_norm": 0.07459353655576706, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0806, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.40643522438611346, | |
| "grad_norm": 0.08267400413751602, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0799, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.4101985135007997, | |
| "grad_norm": 0.07844484597444534, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0843, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.41396180261548593, | |
| "grad_norm": 0.07998470962047577, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0821, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.4177250917301722, | |
| "grad_norm": 0.08860679715871811, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0819, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.4214883808448584, | |
| "grad_norm": 0.07845838367938995, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0802, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.42525166995954466, | |
| "grad_norm": 0.08801402896642685, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0768, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.4290149590742309, | |
| "grad_norm": 0.06149598956108093, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0806, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.43277824818891714, | |
| "grad_norm": 0.07813749462366104, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0775, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.43654153730360334, | |
| "grad_norm": 0.08169027417898178, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0774, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.4403048264182896, | |
| "grad_norm": 0.06748262792825699, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0721, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.4440681155329758, | |
| "grad_norm": 0.07497742772102356, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0687, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.4478314046476621, | |
| "grad_norm": 0.08851341158151627, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0809, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.4515946937623483, | |
| "grad_norm": 0.06805267184972763, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0783, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.45535798287703455, | |
| "grad_norm": 0.07998234033584595, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0816, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.45912127199172076, | |
| "grad_norm": 0.07358496636152267, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0715, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.462884561106407, | |
| "grad_norm": 0.08105491101741791, | |
| "learning_rate": 0.0002, | |
| "loss": 0.076, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.46664785022109323, | |
| "grad_norm": 0.08625109493732452, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0731, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.4704111393357795, | |
| "grad_norm": 0.07986035943031311, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0726, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.4741744284504657, | |
| "grad_norm": 0.07247906923294067, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0763, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.47793771756515196, | |
| "grad_norm": 0.08120191842317581, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0764, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.4817010066798382, | |
| "grad_norm": 0.09049087017774582, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0799, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.48546429579452444, | |
| "grad_norm": 0.06859074532985687, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0733, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.48922758490921064, | |
| "grad_norm": 0.07872766256332397, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0724, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.4929908740238969, | |
| "grad_norm": 0.0663181021809578, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0744, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.4967541631385831, | |
| "grad_norm": 0.07880811393260956, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0759, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.5005174522532694, | |
| "grad_norm": 0.12468737363815308, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0747, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.5042807413679556, | |
| "grad_norm": 0.08328507095575333, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0754, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.5080440304826418, | |
| "grad_norm": 0.06665025651454926, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0721, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.5118073195973281, | |
| "grad_norm": 0.07980209589004517, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0733, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.5155706087120143, | |
| "grad_norm": 0.07951670140028, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0792, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.5193338978267006, | |
| "grad_norm": 0.07515596598386765, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0724, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.5230971869413867, | |
| "grad_norm": 0.08162270486354828, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0714, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.526860476056073, | |
| "grad_norm": 0.08002326637506485, | |
| "learning_rate": 0.0002, | |
| "loss": 0.077, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.5306237651707593, | |
| "grad_norm": 0.07449716329574585, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0707, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.5343870542854455, | |
| "grad_norm": 0.07990019023418427, | |
| "learning_rate": 0.0002, | |
| "loss": 0.071, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.5381503434001317, | |
| "grad_norm": 0.08538304269313812, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0707, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.541913632514818, | |
| "grad_norm": 0.073255255818367, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0699, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.5456769216295042, | |
| "grad_norm": 0.09101062268018723, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0708, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.5494402107441905, | |
| "grad_norm": 0.0757337138056755, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0702, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.5532034998588766, | |
| "grad_norm": 0.06553716212511063, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0698, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.5569667889735629, | |
| "grad_norm": 0.08524072915315628, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0738, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.5607300780882492, | |
| "grad_norm": 0.07361309975385666, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0695, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.5644933672029354, | |
| "grad_norm": 0.07155182212591171, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0667, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.5682566563176216, | |
| "grad_norm": 0.08088400214910507, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0695, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.5720199454323078, | |
| "grad_norm": 0.08842818439006805, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0656, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.5757832345469941, | |
| "grad_norm": 0.07135680317878723, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0673, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.5795465236616804, | |
| "grad_norm": 0.08081484586000443, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0709, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.5833098127763665, | |
| "grad_norm": 0.06999066472053528, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0684, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.5870731018910528, | |
| "grad_norm": 0.06368447095155716, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0691, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.590836391005739, | |
| "grad_norm": 0.08371565490961075, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0641, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.5945996801204253, | |
| "grad_norm": 0.0675625279545784, | |
| "learning_rate": 0.0002, | |
| "loss": 0.069, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.5983629692351115, | |
| "grad_norm": 0.08121863007545471, | |
| "learning_rate": 0.0002, | |
| "loss": 0.065, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.6021262583497977, | |
| "grad_norm": 0.06689095497131348, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0675, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.605889547464484, | |
| "grad_norm": 0.06634503602981567, | |
| "learning_rate": 0.0002, | |
| "loss": 0.064, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.6096528365791702, | |
| "grad_norm": 0.07062681019306183, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0698, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.6134161256938564, | |
| "grad_norm": 0.07908321917057037, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0674, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.6171794148085427, | |
| "grad_norm": 0.08132966607809067, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0694, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.6209427039232289, | |
| "grad_norm": 0.081205353140831, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0627, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.6247059930379152, | |
| "grad_norm": 0.07323930412530899, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0645, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.6284692821526013, | |
| "grad_norm": 0.07979920506477356, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0601, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.6322325712672876, | |
| "grad_norm": 0.06519381701946259, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0677, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.6359958603819739, | |
| "grad_norm": 0.07861756533384323, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0648, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.6397591494966601, | |
| "grad_norm": 0.09498826414346695, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0644, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.6435224386113463, | |
| "grad_norm": 0.07069331407546997, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0683, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.6472857277260325, | |
| "grad_norm": 0.07212232798337936, | |
| "learning_rate": 0.0002, | |
| "loss": 0.061, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.6510490168407188, | |
| "grad_norm": 0.06527985632419586, | |
| "learning_rate": 0.0002, | |
| "loss": 0.063, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.6548123059554051, | |
| "grad_norm": 0.09721993654966354, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0699, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.6585755950700912, | |
| "grad_norm": 0.06381040811538696, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0604, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.6623388841847775, | |
| "grad_norm": 0.07853078097105026, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0649, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.6661021732994638, | |
| "grad_norm": 0.09212848544120789, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0588, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.66986546241415, | |
| "grad_norm": 0.07097720354795456, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0635, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.6736287515288362, | |
| "grad_norm": 0.08024760335683823, | |
| "learning_rate": 0.0002, | |
| "loss": 0.068, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.6773920406435224, | |
| "grad_norm": 0.07371170073747635, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0652, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.6811553297582087, | |
| "grad_norm": 0.0894666537642479, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0659, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.684918618872895, | |
| "grad_norm": 0.07533634454011917, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0656, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.6886819079875811, | |
| "grad_norm": 0.09198255091905594, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0645, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.6924451971022674, | |
| "grad_norm": 0.07213396579027176, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0634, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.6962084862169536, | |
| "grad_norm": 0.06533551216125488, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0666, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.6999717753316399, | |
| "grad_norm": 0.09535999596118927, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0651, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.703735064446326, | |
| "grad_norm": 0.07631547003984451, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0678, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.7074983535610123, | |
| "grad_norm": 0.09607011079788208, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0681, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.7112616426756986, | |
| "grad_norm": 0.07209835946559906, | |
| "learning_rate": 0.0002, | |
| "loss": 0.068, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.7150249317903848, | |
| "grad_norm": 0.09137856960296631, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0598, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.718788220905071, | |
| "grad_norm": 0.09964293241500854, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0656, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.7225515100197573, | |
| "grad_norm": 0.07041902840137482, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0617, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.7263147991344435, | |
| "grad_norm": 0.05418103560805321, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0686, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.7300780882491298, | |
| "grad_norm": 0.07180003076791763, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0627, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.7338413773638159, | |
| "grad_norm": 0.07342009246349335, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0651, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.7376046664785022, | |
| "grad_norm": 0.08151030540466309, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0645, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.7413679555931885, | |
| "grad_norm": 0.09977607429027557, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0606, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.7451312447078747, | |
| "grad_norm": 0.06991346180438995, | |
| "learning_rate": 0.0002, | |
| "loss": 0.064, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.7488945338225609, | |
| "grad_norm": 0.06797617673873901, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0595, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.7526578229372471, | |
| "grad_norm": 0.06056559830904007, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0595, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.7564211120519334, | |
| "grad_norm": 0.06775388866662979, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0624, | |
| "step": 4020 | |
| }, | |
| { | |
| "epoch": 0.7601844011666197, | |
| "grad_norm": 0.08091945201158524, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0657, | |
| "step": 4040 | |
| }, | |
| { | |
| "epoch": 0.7639476902813058, | |
| "grad_norm": 0.1335289478302002, | |
| "learning_rate": 0.0002, | |
| "loss": 0.061, | |
| "step": 4060 | |
| }, | |
| { | |
| "epoch": 0.7677109793959921, | |
| "grad_norm": 0.07901336252689362, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0635, | |
| "step": 4080 | |
| }, | |
| { | |
| "epoch": 0.7714742685106784, | |
| "grad_norm": 0.07874254137277603, | |
| "learning_rate": 0.0002, | |
| "loss": 0.066, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.7752375576253646, | |
| "grad_norm": 0.07547064125537872, | |
| "learning_rate": 0.0002, | |
| "loss": 0.061, | |
| "step": 4120 | |
| }, | |
| { | |
| "epoch": 0.7790008467400508, | |
| "grad_norm": 0.06392517685890198, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0595, | |
| "step": 4140 | |
| }, | |
| { | |
| "epoch": 0.782764135854737, | |
| "grad_norm": 0.08115773648023605, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0633, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 0.7865274249694233, | |
| "grad_norm": 0.07906658202409744, | |
| "learning_rate": 0.0002, | |
| "loss": 0.06, | |
| "step": 4180 | |
| }, | |
| { | |
| "epoch": 0.7902907140841096, | |
| "grad_norm": 0.8131846189498901, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0642, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.7940540031987957, | |
| "grad_norm": 0.08816728740930557, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0683, | |
| "step": 4220 | |
| }, | |
| { | |
| "epoch": 0.797817292313482, | |
| "grad_norm": 0.08981813490390778, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0654, | |
| "step": 4240 | |
| }, | |
| { | |
| "epoch": 0.8015805814281682, | |
| "grad_norm": 0.08015542477369308, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0623, | |
| "step": 4260 | |
| }, | |
| { | |
| "epoch": 0.8053438705428545, | |
| "grad_norm": 0.08604927361011505, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0616, | |
| "step": 4280 | |
| }, | |
| { | |
| "epoch": 0.8091071596575407, | |
| "grad_norm": 0.08916622400283813, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0651, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.8128704487722269, | |
| "grad_norm": 0.06978412717580795, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0577, | |
| "step": 4320 | |
| }, | |
| { | |
| "epoch": 0.8166337378869132, | |
| "grad_norm": 0.089443139731884, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0651, | |
| "step": 4340 | |
| }, | |
| { | |
| "epoch": 0.8203970270015994, | |
| "grad_norm": 0.08108926564455032, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0584, | |
| "step": 4360 | |
| }, | |
| { | |
| "epoch": 0.8241603161162856, | |
| "grad_norm": 0.0913953110575676, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0611, | |
| "step": 4380 | |
| }, | |
| { | |
| "epoch": 0.8279236052309719, | |
| "grad_norm": 0.07268033176660538, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0601, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.8316868943456581, | |
| "grad_norm": 0.07602666318416595, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0642, | |
| "step": 4420 | |
| }, | |
| { | |
| "epoch": 0.8354501834603444, | |
| "grad_norm": 0.06899157911539078, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0603, | |
| "step": 4440 | |
| }, | |
| { | |
| "epoch": 0.8392134725750305, | |
| "grad_norm": 0.07310888916254044, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0614, | |
| "step": 4460 | |
| }, | |
| { | |
| "epoch": 0.8429767616897168, | |
| "grad_norm": 0.086586132645607, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0636, | |
| "step": 4480 | |
| }, | |
| { | |
| "epoch": 0.8467400508044031, | |
| "grad_norm": 0.04938528314232826, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0577, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.8505033399190893, | |
| "grad_norm": 0.07192698121070862, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0574, | |
| "step": 4520 | |
| }, | |
| { | |
| "epoch": 0.8542666290337755, | |
| "grad_norm": 0.06893257051706314, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0604, | |
| "step": 4540 | |
| }, | |
| { | |
| "epoch": 0.8580299181484617, | |
| "grad_norm": 0.06691521406173706, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0649, | |
| "step": 4560 | |
| }, | |
| { | |
| "epoch": 0.861793207263148, | |
| "grad_norm": 0.09420821070671082, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0598, | |
| "step": 4580 | |
| }, | |
| { | |
| "epoch": 0.8655564963778343, | |
| "grad_norm": 0.07227174192667007, | |
| "learning_rate": 0.0002, | |
| "loss": 0.062, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.8693197854925204, | |
| "grad_norm": 0.07998435199260712, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0585, | |
| "step": 4620 | |
| }, | |
| { | |
| "epoch": 0.8730830746072067, | |
| "grad_norm": 0.07850491255521774, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0586, | |
| "step": 4640 | |
| }, | |
| { | |
| "epoch": 0.876846363721893, | |
| "grad_norm": 0.09450593590736389, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0625, | |
| "step": 4660 | |
| }, | |
| { | |
| "epoch": 0.8806096528365792, | |
| "grad_norm": 0.08073689788579941, | |
| "learning_rate": 0.0002, | |
| "loss": 0.062, | |
| "step": 4680 | |
| }, | |
| { | |
| "epoch": 0.8843729419512654, | |
| "grad_norm": 0.06979519873857498, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0584, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.8881362310659516, | |
| "grad_norm": 0.06407754868268967, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0626, | |
| "step": 4720 | |
| }, | |
| { | |
| "epoch": 0.8918995201806379, | |
| "grad_norm": 0.2963426411151886, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0625, | |
| "step": 4740 | |
| }, | |
| { | |
| "epoch": 0.8956628092953242, | |
| "grad_norm": 0.08902490139007568, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0621, | |
| "step": 4760 | |
| }, | |
| { | |
| "epoch": 0.8994260984100103, | |
| "grad_norm": 0.08447249978780746, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0638, | |
| "step": 4780 | |
| }, | |
| { | |
| "epoch": 0.9031893875246966, | |
| "grad_norm": 0.07621373981237411, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0618, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.9069526766393828, | |
| "grad_norm": 0.07609863579273224, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0627, | |
| "step": 4820 | |
| }, | |
| { | |
| "epoch": 0.9107159657540691, | |
| "grad_norm": 0.0785209983587265, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0606, | |
| "step": 4840 | |
| }, | |
| { | |
| "epoch": 0.9144792548687553, | |
| "grad_norm": 0.08515089750289917, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0577, | |
| "step": 4860 | |
| }, | |
| { | |
| "epoch": 0.9182425439834415, | |
| "grad_norm": 0.0810500979423523, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0578, | |
| "step": 4880 | |
| }, | |
| { | |
| "epoch": 0.9220058330981278, | |
| "grad_norm": 0.08074364811182022, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0556, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.925769122212814, | |
| "grad_norm": 0.06328209489583969, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0555, | |
| "step": 4920 | |
| }, | |
| { | |
| "epoch": 0.9295324113275002, | |
| "grad_norm": 0.06556906551122665, | |
| "learning_rate": 0.0002, | |
| "loss": 0.056, | |
| "step": 4940 | |
| }, | |
| { | |
| "epoch": 0.9332957004421865, | |
| "grad_norm": 0.0747644379734993, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0608, | |
| "step": 4960 | |
| }, | |
| { | |
| "epoch": 0.9370589895568727, | |
| "grad_norm": 0.07274675369262695, | |
| "learning_rate": 0.0002, | |
| "loss": 0.057, | |
| "step": 4980 | |
| }, | |
| { | |
| "epoch": 0.940822278671559, | |
| "grad_norm": 0.061261050403118134, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0587, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.9445855677862451, | |
| "grad_norm": 0.08277834206819534, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0608, | |
| "step": 5020 | |
| }, | |
| { | |
| "epoch": 0.9483488569009314, | |
| "grad_norm": 0.08663126826286316, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0595, | |
| "step": 5040 | |
| }, | |
| { | |
| "epoch": 0.9521121460156177, | |
| "grad_norm": 0.06938447803258896, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0567, | |
| "step": 5060 | |
| }, | |
| { | |
| "epoch": 0.9558754351303039, | |
| "grad_norm": 0.07865908741950989, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0588, | |
| "step": 5080 | |
| }, | |
| { | |
| "epoch": 0.9596387242449901, | |
| "grad_norm": 0.061116304248571396, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0574, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.9634020133596763, | |
| "grad_norm": 0.05689473822712898, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0533, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 0.9671653024743626, | |
| "grad_norm": 0.07368452847003937, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0591, | |
| "step": 5140 | |
| }, | |
| { | |
| "epoch": 0.9709285915890489, | |
| "grad_norm": 0.06758634001016617, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0572, | |
| "step": 5160 | |
| }, | |
| { | |
| "epoch": 0.974691880703735, | |
| "grad_norm": 0.07348816096782684, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0577, | |
| "step": 5180 | |
| }, | |
| { | |
| "epoch": 0.9784551698184213, | |
| "grad_norm": 0.07613357156515121, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0566, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.9822184589331076, | |
| "grad_norm": 0.062316812574863434, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0621, | |
| "step": 5220 | |
| }, | |
| { | |
| "epoch": 0.9859817480477938, | |
| "grad_norm": 0.08600709587335587, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0576, | |
| "step": 5240 | |
| }, | |
| { | |
| "epoch": 0.98974503716248, | |
| "grad_norm": 0.06687742471694946, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0571, | |
| "step": 5260 | |
| }, | |
| { | |
| "epoch": 0.9935083262771662, | |
| "grad_norm": 0.07486743479967117, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0594, | |
| "step": 5280 | |
| }, | |
| { | |
| "epoch": 0.9972716153918525, | |
| "grad_norm": 0.08373595029115677, | |
| "learning_rate": 0.0002, | |
| "loss": 0.0579, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.9999059177721329, | |
| "eval_loss": 0.20169740915298462, | |
| "eval_runtime": 422.4603, | |
| "eval_samples_per_second": 4.1, | |
| "eval_steps_per_second": 0.514, | |
| "step": 5314 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 14000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 77, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.423221588013613e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |