shibajustfor commited on
Commit
e5417ca
·
verified ·
1 Parent(s): cbd5780

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb22093dabe281b9cbc4eb22aab4a8cf629244ccccad329a0521f9dd964d963e
3
  size 12609416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3f7ee8792dd9cf0c928273870ab52cb64aff116c6a850cc17fb0f801a208234
3
  size 12609416
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06c0bd56e48ea56e727bbc5767ef94a1a0aed786d16b0e478fa4fa52591ec340
3
  size 6615034
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffeafbaeb8b27e715a256d509047223bfb10dee18e019cc1736c070185ab83db
3
  size 6615034
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d70227acf03aa0e9f6c4188369d5f8fc3ccfbaa5e5e7dc786509f886f3f7220
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13e4ef893c536a59c5ca202a0f867f0e94596caf4c68d31ee7476794a1d4ac8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b80fcc7599efca0c6313d990c467c2eb3001742b23ddaadc22e3499c12cea79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.006539580812869895,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 85.648,
59
  "eval_steps_per_second": 42.824,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 386608712908800.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.01307916162573979,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 85.648,
59
  "eval_steps_per_second": 42.824,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.007847496975443874,
64
+ "grad_norm": 18.687952041625977,
65
+ "learning_rate": 0.0002,
66
+ "loss": 12.9873,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.009155413138017853,
71
+ "grad_norm": 8.790769577026367,
72
+ "learning_rate": 0.0002,
73
+ "loss": 12.423,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.010463329300591831,
78
+ "grad_norm": 8.85031509399414,
79
+ "learning_rate": 0.0002,
80
+ "loss": 12.0676,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.011771245463165811,
85
+ "grad_norm": 12.773844718933105,
86
+ "learning_rate": 0.0002,
87
+ "loss": 12.8019,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.01307916162573979,
92
+ "grad_norm": 11.019274711608887,
93
+ "learning_rate": 0.0002,
94
+ "loss": 12.2702,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.01307916162573979,
99
+ "eval_loss": 3.001326560974121,
100
+ "eval_runtime": 37.3516,
101
+ "eval_samples_per_second": 86.208,
102
+ "eval_steps_per_second": 43.104,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 775094167142400.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null