error577 commited on
Commit
bbb6e3f
·
verified ·
1 Parent(s): eda59ee

Training in progress, step 300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff9fbb30301057510fccd713a7615f48acf1a5bdbe950c8b2d2b4d4e8e7b395e
3
  size 479005064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c9e4dd816674f656017c8a2ed41209cd2c297c41f55fae0279c467dc4d8041e
3
  size 479005064
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dcec3307de05b8592716fe765819c8f7dfe1a4aa8ab3f4487628bfb3f17edab
3
- size 243802484
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eac352bf4a411d3fd08e9ea4170ef4fe6642e77dd87eadf5a63f336287b9b85
3
+ size 243802996
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfe1aa31b8376b0bb046d66ecc354088893e149cb258e8ce5829141b4b8eafc8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3341cd00b63d6a7a00c7b3a77f57159c6ca21f09bf4c7cab59461e7b2bf3b187
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:477e1ab9e7e387f392e0bb68fb7cd86779a760a788b2ed973ec470f1c83dd5f7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d93420319c4318ff13366855f16b6ec61d99b866bdf2a20293a1621b040b36f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.012489150050893287,
5
  "eval_steps": 50,
6
- "global_step": 250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1805,6 +1805,364 @@
1805
  "eval_samples_per_second": 7.598,
1806
  "eval_steps_per_second": 7.598,
1807
  "step": 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808
  }
1809
  ],
1810
  "logging_steps": 1,
@@ -1819,12 +2177,12 @@
1819
  "should_evaluate": false,
1820
  "should_log": false,
1821
  "should_save": true,
1822
- "should_training_stop": false
1823
  },
1824
  "attributes": {}
1825
  }
1826
  },
1827
- "total_flos": 3.5576606625890304e+16,
1828
  "train_batch_size": 1,
1829
  "trial_name": null,
1830
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.014986980061071943,
5
  "eval_steps": 50,
6
+ "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1805
  "eval_samples_per_second": 7.598,
1806
  "eval_steps_per_second": 7.598,
1807
  "step": 250
1808
+ },
1809
+ {
1810
+ "epoch": 0.01253910665109686,
1811
+ "grad_norm": 1.913027048110962,
1812
+ "learning_rate": 2.0641226378639715e-05,
1813
+ "loss": 0.9081,
1814
+ "step": 251
1815
+ },
1816
+ {
1817
+ "epoch": 0.012589063251300433,
1818
+ "grad_norm": 1.6886483430862427,
1819
+ "learning_rate": 1.9826210008049785e-05,
1820
+ "loss": 0.8158,
1821
+ "step": 252
1822
+ },
1823
+ {
1824
+ "epoch": 0.012639019851504006,
1825
+ "grad_norm": 1.481825590133667,
1826
+ "learning_rate": 1.902647010887655e-05,
1827
+ "loss": 0.7513,
1828
+ "step": 253
1829
+ },
1830
+ {
1831
+ "epoch": 0.012688976451707579,
1832
+ "grad_norm": 1.1585659980773926,
1833
+ "learning_rate": 1.8242100534143062e-05,
1834
+ "loss": 0.5836,
1835
+ "step": 254
1836
+ },
1837
+ {
1838
+ "epoch": 0.012738933051911152,
1839
+ "grad_norm": 1.1122362613677979,
1840
+ "learning_rate": 1.7473193333096575e-05,
1841
+ "loss": 0.3605,
1842
+ "step": 255
1843
+ },
1844
+ {
1845
+ "epoch": 0.012788889652114725,
1846
+ "grad_norm": 1.0502725839614868,
1847
+ "learning_rate": 1.671983874040631e-05,
1848
+ "loss": 0.3302,
1849
+ "step": 256
1850
+ },
1851
+ {
1852
+ "epoch": 0.0128388462523183,
1853
+ "grad_norm": 0.9571714997291565,
1854
+ "learning_rate": 1.598212516557394e-05,
1855
+ "loss": 0.3259,
1856
+ "step": 257
1857
+ },
1858
+ {
1859
+ "epoch": 0.012888802852521872,
1860
+ "grad_norm": 0.7586105465888977,
1861
+ "learning_rate": 1.526013918255836e-05,
1862
+ "loss": 0.3028,
1863
+ "step": 258
1864
+ },
1865
+ {
1866
+ "epoch": 0.012938759452725445,
1867
+ "grad_norm": 0.6112863421440125,
1868
+ "learning_rate": 1.4553965519615723e-05,
1869
+ "loss": 0.3124,
1870
+ "step": 259
1871
+ },
1872
+ {
1873
+ "epoch": 0.012988716052929018,
1874
+ "grad_norm": 0.619231641292572,
1875
+ "learning_rate": 1.3863687049356464e-05,
1876
+ "loss": 0.3073,
1877
+ "step": 260
1878
+ },
1879
+ {
1880
+ "epoch": 0.01303867265313259,
1881
+ "grad_norm": 0.5576855540275574,
1882
+ "learning_rate": 1.3189384779019535e-05,
1883
+ "loss": 0.32,
1884
+ "step": 261
1885
+ },
1886
+ {
1887
+ "epoch": 0.013088629253336164,
1888
+ "grad_norm": 9.921064376831055,
1889
+ "learning_rate": 1.25311378409661e-05,
1890
+ "loss": 0.437,
1891
+ "step": 262
1892
+ },
1893
+ {
1894
+ "epoch": 0.013138585853539737,
1895
+ "grad_norm": 0.4192136824131012,
1896
+ "learning_rate": 1.1889023483392879e-05,
1897
+ "loss": 0.2452,
1898
+ "step": 263
1899
+ },
1900
+ {
1901
+ "epoch": 0.013188542453743311,
1902
+ "grad_norm": 0.47411757707595825,
1903
+ "learning_rate": 1.1263117061266675e-05,
1904
+ "loss": 0.26,
1905
+ "step": 264
1906
+ },
1907
+ {
1908
+ "epoch": 0.013238499053946884,
1909
+ "grad_norm": 0.475243479013443,
1910
+ "learning_rate": 1.0653492027481286e-05,
1911
+ "loss": 0.2812,
1912
+ "step": 265
1913
+ },
1914
+ {
1915
+ "epoch": 0.013288455654150457,
1916
+ "grad_norm": 0.41210004687309265,
1917
+ "learning_rate": 1.0060219924237379e-05,
1918
+ "loss": 0.2719,
1919
+ "step": 266
1920
+ },
1921
+ {
1922
+ "epoch": 0.01333841225435403,
1923
+ "grad_norm": 0.39359334111213684,
1924
+ "learning_rate": 9.48337037464666e-06,
1925
+ "loss": 0.2401,
1926
+ "step": 267
1927
+ },
1928
+ {
1929
+ "epoch": 0.013388368854557603,
1930
+ "grad_norm": 0.48420053720474243,
1931
+ "learning_rate": 8.923011074561404e-06,
1932
+ "loss": 0.2553,
1933
+ "step": 268
1934
+ },
1935
+ {
1936
+ "epoch": 0.013438325454761176,
1937
+ "grad_norm": 0.4084879159927368,
1938
+ "learning_rate": 8.379207784630004e-06,
1939
+ "loss": 0.2873,
1940
+ "step": 269
1941
+ },
1942
+ {
1943
+ "epoch": 0.013488282054964749,
1944
+ "grad_norm": 0.3936237692832947,
1945
+ "learning_rate": 7.852024322579648e-06,
1946
+ "loss": 0.25,
1947
+ "step": 270
1948
+ },
1949
+ {
1950
+ "epoch": 0.013538238655168322,
1951
+ "grad_norm": 0.3661133944988251,
1952
+ "learning_rate": 7.34152255572697e-06,
1953
+ "loss": 0.2493,
1954
+ "step": 271
1955
+ },
1956
+ {
1957
+ "epoch": 0.013588195255371896,
1958
+ "grad_norm": 0.6841079592704773,
1959
+ "learning_rate": 6.847762393717782e-06,
1960
+ "loss": 0.5608,
1961
+ "step": 272
1962
+ },
1963
+ {
1964
+ "epoch": 0.01363815185557547,
1965
+ "grad_norm": 1.142737865447998,
1966
+ "learning_rate": 6.370801781496326e-06,
1967
+ "loss": 0.8011,
1968
+ "step": 273
1969
+ },
1970
+ {
1971
+ "epoch": 0.013688108455779042,
1972
+ "grad_norm": 1.2466866970062256,
1973
+ "learning_rate": 5.910696692505201e-06,
1974
+ "loss": 0.651,
1975
+ "step": 274
1976
+ },
1977
+ {
1978
+ "epoch": 0.013738065055982615,
1979
+ "grad_norm": 1.065708041191101,
1980
+ "learning_rate": 5.467501122116563e-06,
1981
+ "loss": 0.6143,
1982
+ "step": 275
1983
+ },
1984
+ {
1985
+ "epoch": 0.013788021656186188,
1986
+ "grad_norm": 0.997288167476654,
1987
+ "learning_rate": 5.0412670812956465e-06,
1988
+ "loss": 0.5756,
1989
+ "step": 276
1990
+ },
1991
+ {
1992
+ "epoch": 0.013837978256389761,
1993
+ "grad_norm": 1.138244390487671,
1994
+ "learning_rate": 4.6320445904969475e-06,
1995
+ "loss": 0.6112,
1996
+ "step": 277
1997
+ },
1998
+ {
1999
+ "epoch": 0.013887934856593334,
2000
+ "grad_norm": 1.1244843006134033,
2001
+ "learning_rate": 4.239881673794165e-06,
2002
+ "loss": 0.656,
2003
+ "step": 278
2004
+ },
2005
+ {
2006
+ "epoch": 0.013937891456796908,
2007
+ "grad_norm": 1.0220907926559448,
2008
+ "learning_rate": 3.864824353244367e-06,
2009
+ "loss": 0.5637,
2010
+ "step": 279
2011
+ },
2012
+ {
2013
+ "epoch": 0.013987848057000481,
2014
+ "grad_norm": 0.9114366173744202,
2015
+ "learning_rate": 3.506916643487001e-06,
2016
+ "loss": 0.6459,
2017
+ "step": 280
2018
+ },
2019
+ {
2020
+ "epoch": 0.014037804657204054,
2021
+ "grad_norm": 1.131927728652954,
2022
+ "learning_rate": 3.166200546578718e-06,
2023
+ "loss": 0.4715,
2024
+ "step": 281
2025
+ },
2026
+ {
2027
+ "epoch": 0.014087761257407627,
2028
+ "grad_norm": 1.0005278587341309,
2029
+ "learning_rate": 2.8427160470641253e-06,
2030
+ "loss": 0.4945,
2031
+ "step": 282
2032
+ },
2033
+ {
2034
+ "epoch": 0.0141377178576112,
2035
+ "grad_norm": 1.0136245489120483,
2036
+ "learning_rate": 2.5365011072835117e-06,
2037
+ "loss": 0.4782,
2038
+ "step": 283
2039
+ },
2040
+ {
2041
+ "epoch": 0.014187674457814773,
2042
+ "grad_norm": 1.0716521739959717,
2043
+ "learning_rate": 2.2475916629177415e-06,
2044
+ "loss": 0.7236,
2045
+ "step": 284
2046
+ },
2047
+ {
2048
+ "epoch": 0.014237631058018346,
2049
+ "grad_norm": 1.3851267099380493,
2050
+ "learning_rate": 1.9760216187710787e-06,
2051
+ "loss": 1.4572,
2052
+ "step": 285
2053
+ },
2054
+ {
2055
+ "epoch": 0.014287587658221919,
2056
+ "grad_norm": 1.6684229373931885,
2057
+ "learning_rate": 1.7218228447922867e-06,
2058
+ "loss": 1.241,
2059
+ "step": 286
2060
+ },
2061
+ {
2062
+ "epoch": 0.014337544258425493,
2063
+ "grad_norm": 1.824377417564392,
2064
+ "learning_rate": 1.4850251723345196e-06,
2065
+ "loss": 1.2239,
2066
+ "step": 287
2067
+ },
2068
+ {
2069
+ "epoch": 0.014387500858629066,
2070
+ "grad_norm": 1.8666472434997559,
2071
+ "learning_rate": 1.2656563906545902e-06,
2072
+ "loss": 1.0805,
2073
+ "step": 288
2074
+ },
2075
+ {
2076
+ "epoch": 0.01443745745883264,
2077
+ "grad_norm": 1.4249438047409058,
2078
+ "learning_rate": 1.0637422436516274e-06,
2079
+ "loss": 1.2095,
2080
+ "step": 289
2081
+ },
2082
+ {
2083
+ "epoch": 0.014487414059036212,
2084
+ "grad_norm": 1.5718159675598145,
2085
+ "learning_rate": 8.793064268460604e-07,
2086
+ "loss": 1.1586,
2087
+ "step": 290
2088
+ },
2089
+ {
2090
+ "epoch": 0.014537370659239785,
2091
+ "grad_norm": 1.645261526107788,
2092
+ "learning_rate": 7.123705845987093e-07,
2093
+ "loss": 1.36,
2094
+ "step": 291
2095
+ },
2096
+ {
2097
+ "epoch": 0.014587327259443358,
2098
+ "grad_norm": 1.381696343421936,
2099
+ "learning_rate": 5.629543075708176e-07,
2100
+ "loss": 0.9215,
2101
+ "step": 292
2102
+ },
2103
+ {
2104
+ "epoch": 0.014637283859646931,
2105
+ "grad_norm": 1.696902871131897,
2106
+ "learning_rate": 4.310751304249738e-07,
2107
+ "loss": 1.2197,
2108
+ "step": 293
2109
+ },
2110
+ {
2111
+ "epoch": 0.014687240459850506,
2112
+ "grad_norm": 1.790809988975525,
2113
+ "learning_rate": 3.167485297673411e-07,
2114
+ "loss": 1.1144,
2115
+ "step": 294
2116
+ },
2117
+ {
2118
+ "epoch": 0.014737197060054079,
2119
+ "grad_norm": 1.3841776847839355,
2120
+ "learning_rate": 2.1998792233142714e-07,
2121
+ "loss": 1.1018,
2122
+ "step": 295
2123
+ },
2124
+ {
2125
+ "epoch": 0.014787153660257651,
2126
+ "grad_norm": 1.6458978652954102,
2127
+ "learning_rate": 1.4080466340349316e-07,
2128
+ "loss": 1.0377,
2129
+ "step": 296
2130
+ },
2131
+ {
2132
+ "epoch": 0.014837110260461224,
2133
+ "grad_norm": 1.813490629196167,
2134
+ "learning_rate": 7.92080454900701e-08,
2135
+ "loss": 1.1065,
2136
+ "step": 297
2137
+ },
2138
+ {
2139
+ "epoch": 0.014887066860664797,
2140
+ "grad_norm": 1.5537409782409668,
2141
+ "learning_rate": 3.5205297227380855e-08,
2142
+ "loss": 0.9795,
2143
+ "step": 298
2144
+ },
2145
+ {
2146
+ "epoch": 0.01493702346086837,
2147
+ "grad_norm": 1.6343427896499634,
2148
+ "learning_rate": 8.801582533035644e-09,
2149
+ "loss": 1.0971,
2150
+ "step": 299
2151
+ },
2152
+ {
2153
+ "epoch": 0.014986980061071943,
2154
+ "grad_norm": 2.027508497238159,
2155
+ "learning_rate": 0.0,
2156
+ "loss": 0.9164,
2157
+ "step": 300
2158
+ },
2159
+ {
2160
+ "epoch": 0.014986980061071943,
2161
+ "eval_loss": 0.5926434993743896,
2162
+ "eval_runtime": 84.603,
2163
+ "eval_samples_per_second": 7.588,
2164
+ "eval_steps_per_second": 7.588,
2165
+ "step": 300
2166
  }
2167
  ],
2168
  "logging_steps": 1,
 
2177
  "should_evaluate": false,
2178
  "should_log": false,
2179
  "should_save": true,
2180
+ "should_training_stop": true
2181
  },
2182
  "attributes": {}
2183
  }
2184
  },
2185
+ "total_flos": 4.26901495653335e+16,
2186
  "train_batch_size": 1,
2187
  "trial_name": null,
2188
  "trial_params": null