shibajustfor commited on
Commit
ea64d72
·
verified ·
1 Parent(s): 3a95015

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9297ffddbd1a28198089268a1ffedecfc9487af51640095ef4fdd325f16eeb84
3
  size 125248064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07e62ac60d9843fe1f10a62bfdeab4d7b3b595d3803d1b2c746ad5f9b0a77ec8
3
  size 125248064
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19ee64d67fdd9152201dc6423349a9c1f35d14ac73cea6d7b65236053c678c6f
3
  size 64219860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc3b912d5cdd4e89fe9a82bfd072cb9c15c71075f6f1ad5fd633ecf1d242aa87
3
  size 64219860
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a7f1807a79f764cdbf1638aa0e0db1154794499b1015a5180c25c186b8d6d94
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55f5b8fc91a819fc39bd479452495c8f8f43e1c210ee91583af5c241cfc6079a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d31dc31a119769737d72f3df4c8cdf99522596cafc12bf2eea05a4ff374f599c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05350454788657036,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 21.164,
102
  "eval_steps_per_second": 10.596,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 3.1662041726976e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08025682182985554,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 21.164,
102
  "eval_steps_per_second": 10.596,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.05885500267522739,
107
+ "grad_norm": 0.5722771883010864,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.9267,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.06420545746388442,
114
+ "grad_norm": 0.5415870547294617,
115
+ "learning_rate": 0.0002,
116
+ "loss": 0.9077,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.06955591225254147,
121
+ "grad_norm": 0.5300649404525757,
122
+ "learning_rate": 0.0002,
123
+ "loss": 0.8924,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.0749063670411985,
128
+ "grad_norm": 0.5958105325698853,
129
+ "learning_rate": 0.0002,
130
+ "loss": 0.9126,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.08025682182985554,
135
+ "grad_norm": 0.5649930238723755,
136
+ "learning_rate": 0.0002,
137
+ "loss": 0.8239,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.08025682182985554,
142
+ "eval_loss": 0.8727919459342957,
143
+ "eval_runtime": 37.1914,
144
+ "eval_samples_per_second": 21.161,
145
+ "eval_steps_per_second": 10.594,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 4.7493062590464e+16,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null