dimasik1987 commited on
Commit
b68e938
·
verified ·
1 Parent(s): 046c6ae

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:554ac76717b212531d487f826d6f56be0885baa17fbb23deb0b021b0b65b2f13
3
  size 203456160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a76e3b7376f20d6fcdd5aabb63c0ab16c043b32e2b7dbf9950ba36d3020f24b
3
  size 203456160
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d32415e80e85feda35ed2a908559f19ac29ad6a54e828bab7c25f6a83b3ea21e
3
  size 407121750
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c33babc82d5c51590dec3f18ac7d982254164ed69bb16834f9adefb17093787
3
  size 407121750
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0714cccf9e05fe593f5ea870602d0318c86b16af3be10896beafa4c7a48d7aa5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e39d0af2cf4b87774b10590120a870543f9b08e53cedf7ba105ba8ca2c73da7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6676fe28230ae15b45fb334c871c6fdf1a7984a935952b9f8650896c37a8c106
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0007001526332740537,
5
  "eval_steps": 5,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -230,6 +230,221 @@
230
  "eval_samples_per_second": 3.001,
231
  "eval_steps_per_second": 1.5,
232
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  }
234
  ],
235
  "logging_steps": 1,
@@ -244,12 +459,12 @@
244
  "should_evaluate": false,
245
  "should_log": false,
246
  "should_save": true,
247
- "should_training_stop": false
248
  },
249
  "attributes": {}
250
  }
251
  },
252
- "total_flos": 8294216761344000.0,
253
  "train_batch_size": 2,
254
  "trial_name": null,
255
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0014003052665481075,
5
  "eval_steps": 5,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
230
  "eval_samples_per_second": 3.001,
231
  "eval_steps_per_second": 1.5,
232
  "step": 25
233
+ },
234
+ {
235
+ "epoch": 0.0007281587386050159,
236
+ "grad_norm": 1.333717942237854,
237
+ "learning_rate": 6.545084971874738e-05,
238
+ "loss": 1.1736,
239
+ "step": 26
240
+ },
241
+ {
242
+ "epoch": 0.000756164843935978,
243
+ "grad_norm": 1.4034525156021118,
244
+ "learning_rate": 6.167226819279528e-05,
245
+ "loss": 1.2473,
246
+ "step": 27
247
+ },
248
+ {
249
+ "epoch": 0.0007841709492669402,
250
+ "grad_norm": 1.6855180263519287,
251
+ "learning_rate": 5.782172325201155e-05,
252
+ "loss": 1.5994,
253
+ "step": 28
254
+ },
255
+ {
256
+ "epoch": 0.0008121770545979023,
257
+ "grad_norm": 1.4234453439712524,
258
+ "learning_rate": 5.392295478639225e-05,
259
+ "loss": 1.1151,
260
+ "step": 29
261
+ },
262
+ {
263
+ "epoch": 0.0008401831599288645,
264
+ "grad_norm": 1.4440644979476929,
265
+ "learning_rate": 5e-05,
266
+ "loss": 1.389,
267
+ "step": 30
268
+ },
269
+ {
270
+ "epoch": 0.0008401831599288645,
271
+ "eval_loss": 1.3428627252578735,
272
+ "eval_runtime": 5009.2202,
273
+ "eval_samples_per_second": 3.001,
274
+ "eval_steps_per_second": 1.501,
275
+ "step": 30
276
+ },
277
+ {
278
+ "epoch": 0.0008681892652598267,
279
+ "grad_norm": 1.7354429960250854,
280
+ "learning_rate": 4.607704521360776e-05,
281
+ "loss": 1.1572,
282
+ "step": 31
283
+ },
284
+ {
285
+ "epoch": 0.0008961953705907888,
286
+ "grad_norm": 3.8051087856292725,
287
+ "learning_rate": 4.2178276747988446e-05,
288
+ "loss": 1.4318,
289
+ "step": 32
290
+ },
291
+ {
292
+ "epoch": 0.0009242014759217509,
293
+ "grad_norm": 1.5283364057540894,
294
+ "learning_rate": 3.832773180720475e-05,
295
+ "loss": 1.3123,
296
+ "step": 33
297
+ },
298
+ {
299
+ "epoch": 0.0009522075812527131,
300
+ "grad_norm": 1.352604866027832,
301
+ "learning_rate": 3.4549150281252636e-05,
302
+ "loss": 1.3531,
303
+ "step": 34
304
+ },
305
+ {
306
+ "epoch": 0.0009802136865836753,
307
+ "grad_norm": 1.7521588802337646,
308
+ "learning_rate": 3.086582838174551e-05,
309
+ "loss": 1.3434,
310
+ "step": 35
311
+ },
312
+ {
313
+ "epoch": 0.0009802136865836753,
314
+ "eval_loss": 1.3325791358947754,
315
+ "eval_runtime": 5008.929,
316
+ "eval_samples_per_second": 3.002,
317
+ "eval_steps_per_second": 1.501,
318
+ "step": 35
319
+ },
320
+ {
321
+ "epoch": 0.0010082197919146375,
322
+ "grad_norm": 1.2757823467254639,
323
+ "learning_rate": 2.7300475013022663e-05,
324
+ "loss": 1.3256,
325
+ "step": 36
326
+ },
327
+ {
328
+ "epoch": 0.0010362258972455996,
329
+ "grad_norm": 1.1284574270248413,
330
+ "learning_rate": 2.3875071764202563e-05,
331
+ "loss": 1.2034,
332
+ "step": 37
333
+ },
334
+ {
335
+ "epoch": 0.0010642320025765616,
336
+ "grad_norm": 1.2620298862457275,
337
+ "learning_rate": 2.061073738537635e-05,
338
+ "loss": 1.6568,
339
+ "step": 38
340
+ },
341
+ {
342
+ "epoch": 0.0010922381079075238,
343
+ "grad_norm": 1.244226336479187,
344
+ "learning_rate": 1.7527597583490822e-05,
345
+ "loss": 1.2673,
346
+ "step": 39
347
+ },
348
+ {
349
+ "epoch": 0.001120244213238486,
350
+ "grad_norm": 1.1295228004455566,
351
+ "learning_rate": 1.4644660940672627e-05,
352
+ "loss": 1.4755,
353
+ "step": 40
354
+ },
355
+ {
356
+ "epoch": 0.001120244213238486,
357
+ "eval_loss": 1.3263696432113647,
358
+ "eval_runtime": 5009.0341,
359
+ "eval_samples_per_second": 3.002,
360
+ "eval_steps_per_second": 1.501,
361
+ "step": 40
362
+ },
363
+ {
364
+ "epoch": 0.0011482503185694481,
365
+ "grad_norm": 1.3037229776382446,
366
+ "learning_rate": 1.1979701719998453e-05,
367
+ "loss": 1.4539,
368
+ "step": 41
369
+ },
370
+ {
371
+ "epoch": 0.0011762564239004103,
372
+ "grad_norm": 1.3781661987304688,
373
+ "learning_rate": 9.549150281252633e-06,
374
+ "loss": 1.0606,
375
+ "step": 42
376
+ },
377
+ {
378
+ "epoch": 0.0012042625292313725,
379
+ "grad_norm": 1.5477420091629028,
380
+ "learning_rate": 7.367991782295391e-06,
381
+ "loss": 1.6065,
382
+ "step": 43
383
+ },
384
+ {
385
+ "epoch": 0.0012322686345623347,
386
+ "grad_norm": 1.5209083557128906,
387
+ "learning_rate": 5.449673790581611e-06,
388
+ "loss": 1.4001,
389
+ "step": 44
390
+ },
391
+ {
392
+ "epoch": 0.0012602747398932968,
393
+ "grad_norm": 1.1803650856018066,
394
+ "learning_rate": 3.8060233744356633e-06,
395
+ "loss": 1.399,
396
+ "step": 45
397
+ },
398
+ {
399
+ "epoch": 0.0012602747398932968,
400
+ "eval_loss": 1.3230071067810059,
401
+ "eval_runtime": 5009.7608,
402
+ "eval_samples_per_second": 3.001,
403
+ "eval_steps_per_second": 1.501,
404
+ "step": 45
405
+ },
406
+ {
407
+ "epoch": 0.0012882808452242588,
408
+ "grad_norm": 1.2966963052749634,
409
+ "learning_rate": 2.4471741852423237e-06,
410
+ "loss": 1.2219,
411
+ "step": 46
412
+ },
413
+ {
414
+ "epoch": 0.001316286950555221,
415
+ "grad_norm": 1.1639353036880493,
416
+ "learning_rate": 1.3815039801161721e-06,
417
+ "loss": 0.9402,
418
+ "step": 47
419
+ },
420
+ {
421
+ "epoch": 0.0013442930558861831,
422
+ "grad_norm": 1.3903875350952148,
423
+ "learning_rate": 6.15582970243117e-07,
424
+ "loss": 1.2695,
425
+ "step": 48
426
+ },
427
+ {
428
+ "epoch": 0.0013722991612171453,
429
+ "grad_norm": 1.8993217945098877,
430
+ "learning_rate": 1.5413331334360182e-07,
431
+ "loss": 1.5358,
432
+ "step": 49
433
+ },
434
+ {
435
+ "epoch": 0.0014003052665481075,
436
+ "grad_norm": 0.992319643497467,
437
+ "learning_rate": 0.0,
438
+ "loss": 0.9032,
439
+ "step": 50
440
+ },
441
+ {
442
+ "epoch": 0.0014003052665481075,
443
+ "eval_loss": 1.3224403858184814,
444
+ "eval_runtime": 5007.1747,
445
+ "eval_samples_per_second": 3.003,
446
+ "eval_steps_per_second": 1.501,
447
+ "step": 50
448
  }
449
  ],
450
  "logging_steps": 1,
 
459
  "should_evaluate": false,
460
  "should_log": false,
461
  "should_save": true,
462
+ "should_training_stop": true
463
  },
464
  "attributes": {}
465
  }
466
  },
467
+ "total_flos": 1.6588433522688e+16,
468
  "train_batch_size": 2,
469
  "trial_name": null,
470
  "trial_params": null