Upload checkpoint 700
Browse files- README.md +3 -3
- adapter_model.safetensors +1 -1
- optimizer.pt +1 -1
- scheduler.pt +1 -1
- trainer_state.json +703 -3
README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
base_model: Qwen/Qwen2.5-3B-Instruct
|
| 3 |
library_name: peft
|
| 4 |
---
|
| 5 |
-
# Gradience T1 3B (Step
|
| 6 |
|
| 7 |
> [!NOTE]
|
| 8 |
> Training in progress...
|
|
@@ -38,10 +38,10 @@ library_name: peft
|
|
| 38 |
</head>
|
| 39 |
<body>
|
| 40 |
<div style="width: 100%; background-color: #e0e0e0; border-radius: 25px; overflow: hidden; margin: 20px 0;">
|
| 41 |
-
<div style="height: 30px; width:
|
| 42 |
<!-- 3.75% -->
|
| 43 |
</div>
|
| 44 |
</div>
|
| 45 |
-
<p style="font-family: Arial, sans-serif; font-size: 16px;">Progress:
|
| 46 |
</body>
|
| 47 |
</html>
|
|
|
|
| 2 |
base_model: Qwen/Qwen2.5-3B-Instruct
|
| 3 |
library_name: peft
|
| 4 |
---
|
| 5 |
+
# Gradience T1 3B (Step 700 Checkpoint)
|
| 6 |
|
| 7 |
> [!NOTE]
|
| 8 |
> Training in progress...
|
|
|
|
| 38 |
</head>
|
| 39 |
<body>
|
| 40 |
<div style="width: 100%; background-color: #e0e0e0; border-radius: 25px; overflow: hidden; margin: 20px 0;">
|
| 41 |
+
<div style="height: 30px; width: 7.12%; background-color: #76c7c0; text-align: center; line-height: 30px; color: white; border-radius: 25px 0 0 25px;">
|
| 42 |
<!-- 3.75% -->
|
| 43 |
</div>
|
| 44 |
</div>
|
| 45 |
+
<p style="font-family: Arial, sans-serif; font-size: 16px;">Progress: 700 out of 9838 steps</p>
|
| 46 |
</body>
|
| 47 |
</html>
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 119801528
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b5e41255c1d4bf0f9456736397070a1d6da928c6b983a2fa6036e435f3ba138
|
| 3 |
size 119801528
|
optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 61392692
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0e826b6d29db32cb20a8d6d552c0cd3fe94ae03e9488dd1fcc85f7c2c6c4f27
|
| 3 |
size 61392692
|
scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1064
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89004edc796db902f1191936d15dd603ea7441e94ff73ffee86a12a07c5c308d
|
| 3 |
size 1064
|
trainer_state.json
CHANGED
|
@@ -2,9 +2,9 @@
|
|
| 2 |
"best_global_step": null,
|
| 3 |
"best_metric": null,
|
| 4 |
"best_model_checkpoint": null,
|
| 5 |
-
"epoch": 0.
|
| 6 |
"eval_steps": 500,
|
| 7 |
-
"global_step":
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
@@ -4208,6 +4208,706 @@
|
|
| 4208 |
"learning_rate": 0.0001879182345164243,
|
| 4209 |
"loss": 1.2306,
|
| 4210 |
"step": 600
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4211 |
}
|
| 4212 |
],
|
| 4213 |
"logging_steps": 1,
|
|
@@ -4227,7 +4927,7 @@
|
|
| 4227 |
"attributes": {}
|
| 4228 |
}
|
| 4229 |
},
|
| 4230 |
-
"total_flos": 3.
|
| 4231 |
"train_batch_size": 8,
|
| 4232 |
"trial_name": null,
|
| 4233 |
"trial_params": null
|
|
|
|
| 2 |
"best_global_step": null,
|
| 3 |
"best_metric": null,
|
| 4 |
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 0.1423053466151657,
|
| 6 |
"eval_steps": 500,
|
| 7 |
+
"global_step": 700,
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
|
|
| 4208 |
"learning_rate": 0.0001879182345164243,
|
| 4209 |
"loss": 1.2306,
|
| 4210 |
"step": 600
|
| 4211 |
+
},
|
| 4212 |
+
{
|
| 4213 |
+
"epoch": 0.1221793047367351,
|
| 4214 |
+
"grad_norm": 0.1361241340637207,
|
| 4215 |
+
"learning_rate": 0.00018789789484389302,
|
| 4216 |
+
"loss": 1.1563,
|
| 4217 |
+
"step": 601
|
| 4218 |
+
},
|
| 4219 |
+
{
|
| 4220 |
+
"epoch": 0.12238259808904249,
|
| 4221 |
+
"grad_norm": 0.11735684424638748,
|
| 4222 |
+
"learning_rate": 0.00018787755517136174,
|
| 4223 |
+
"loss": 1.104,
|
| 4224 |
+
"step": 602
|
| 4225 |
+
},
|
| 4226 |
+
{
|
| 4227 |
+
"epoch": 0.12258589144134986,
|
| 4228 |
+
"grad_norm": 0.11648523807525635,
|
| 4229 |
+
"learning_rate": 0.0001878572154988305,
|
| 4230 |
+
"loss": 1.0008,
|
| 4231 |
+
"step": 603
|
| 4232 |
+
},
|
| 4233 |
+
{
|
| 4234 |
+
"epoch": 0.12278918479365725,
|
| 4235 |
+
"grad_norm": 0.12473436444997787,
|
| 4236 |
+
"learning_rate": 0.00018783687582629922,
|
| 4237 |
+
"loss": 1.0741,
|
| 4238 |
+
"step": 604
|
| 4239 |
+
},
|
| 4240 |
+
{
|
| 4241 |
+
"epoch": 0.12299247814596463,
|
| 4242 |
+
"grad_norm": 0.11664781719446182,
|
| 4243 |
+
"learning_rate": 0.00018781653615376794,
|
| 4244 |
+
"loss": 1.1155,
|
| 4245 |
+
"step": 605
|
| 4246 |
+
},
|
| 4247 |
+
{
|
| 4248 |
+
"epoch": 0.123195771498272,
|
| 4249 |
+
"grad_norm": 0.12415888160467148,
|
| 4250 |
+
"learning_rate": 0.00018779619648123666,
|
| 4251 |
+
"loss": 1.158,
|
| 4252 |
+
"step": 606
|
| 4253 |
+
},
|
| 4254 |
+
{
|
| 4255 |
+
"epoch": 0.12339906485057939,
|
| 4256 |
+
"grad_norm": 0.1223251074552536,
|
| 4257 |
+
"learning_rate": 0.0001877758568087054,
|
| 4258 |
+
"loss": 1.1045,
|
| 4259 |
+
"step": 607
|
| 4260 |
+
},
|
| 4261 |
+
{
|
| 4262 |
+
"epoch": 0.12360235820288677,
|
| 4263 |
+
"grad_norm": 0.12289747595787048,
|
| 4264 |
+
"learning_rate": 0.00018775551713617411,
|
| 4265 |
+
"loss": 1.0768,
|
| 4266 |
+
"step": 608
|
| 4267 |
+
},
|
| 4268 |
+
{
|
| 4269 |
+
"epoch": 0.12380565155519414,
|
| 4270 |
+
"grad_norm": 0.1316901594400406,
|
| 4271 |
+
"learning_rate": 0.00018773517746364284,
|
| 4272 |
+
"loss": 1.2156,
|
| 4273 |
+
"step": 609
|
| 4274 |
+
},
|
| 4275 |
+
{
|
| 4276 |
+
"epoch": 0.12400894490750153,
|
| 4277 |
+
"grad_norm": 0.12060056626796722,
|
| 4278 |
+
"learning_rate": 0.00018771483779111156,
|
| 4279 |
+
"loss": 1.0221,
|
| 4280 |
+
"step": 610
|
| 4281 |
+
},
|
| 4282 |
+
{
|
| 4283 |
+
"epoch": 0.1242122382598089,
|
| 4284 |
+
"grad_norm": 0.1384373903274536,
|
| 4285 |
+
"learning_rate": 0.00018769449811858031,
|
| 4286 |
+
"loss": 1.1059,
|
| 4287 |
+
"step": 611
|
| 4288 |
+
},
|
| 4289 |
+
{
|
| 4290 |
+
"epoch": 0.12441553161211628,
|
| 4291 |
+
"grad_norm": 0.12399812787771225,
|
| 4292 |
+
"learning_rate": 0.00018767415844604904,
|
| 4293 |
+
"loss": 1.0193,
|
| 4294 |
+
"step": 612
|
| 4295 |
+
},
|
| 4296 |
+
{
|
| 4297 |
+
"epoch": 0.12461882496442367,
|
| 4298 |
+
"grad_norm": 0.13406959176063538,
|
| 4299 |
+
"learning_rate": 0.00018765381877351776,
|
| 4300 |
+
"loss": 1.1572,
|
| 4301 |
+
"step": 613
|
| 4302 |
+
},
|
| 4303 |
+
{
|
| 4304 |
+
"epoch": 0.12482211831673104,
|
| 4305 |
+
"grad_norm": 0.12881499528884888,
|
| 4306 |
+
"learning_rate": 0.0001876334791009865,
|
| 4307 |
+
"loss": 1.1914,
|
| 4308 |
+
"step": 614
|
| 4309 |
+
},
|
| 4310 |
+
{
|
| 4311 |
+
"epoch": 0.1250254116690384,
|
| 4312 |
+
"grad_norm": 0.11472728103399277,
|
| 4313 |
+
"learning_rate": 0.0001876131394284552,
|
| 4314 |
+
"loss": 1.0822,
|
| 4315 |
+
"step": 615
|
| 4316 |
+
},
|
| 4317 |
+
{
|
| 4318 |
+
"epoch": 0.1252287050213458,
|
| 4319 |
+
"grad_norm": 0.1251503825187683,
|
| 4320 |
+
"learning_rate": 0.00018759279975592394,
|
| 4321 |
+
"loss": 1.1783,
|
| 4322 |
+
"step": 616
|
| 4323 |
+
},
|
| 4324 |
+
{
|
| 4325 |
+
"epoch": 0.12543199837365318,
|
| 4326 |
+
"grad_norm": 0.1414482593536377,
|
| 4327 |
+
"learning_rate": 0.00018757246008339266,
|
| 4328 |
+
"loss": 1.1925,
|
| 4329 |
+
"step": 617
|
| 4330 |
+
},
|
| 4331 |
+
{
|
| 4332 |
+
"epoch": 0.12563529172596055,
|
| 4333 |
+
"grad_norm": 0.122686967253685,
|
| 4334 |
+
"learning_rate": 0.00018755212041086139,
|
| 4335 |
+
"loss": 1.091,
|
| 4336 |
+
"step": 618
|
| 4337 |
+
},
|
| 4338 |
+
{
|
| 4339 |
+
"epoch": 0.12583858507826795,
|
| 4340 |
+
"grad_norm": 0.12301596254110336,
|
| 4341 |
+
"learning_rate": 0.00018753178073833014,
|
| 4342 |
+
"loss": 1.108,
|
| 4343 |
+
"step": 619
|
| 4344 |
+
},
|
| 4345 |
+
{
|
| 4346 |
+
"epoch": 0.12604187843057532,
|
| 4347 |
+
"grad_norm": 0.1191742941737175,
|
| 4348 |
+
"learning_rate": 0.00018751144106579886,
|
| 4349 |
+
"loss": 1.0413,
|
| 4350 |
+
"step": 620
|
| 4351 |
+
},
|
| 4352 |
+
{
|
| 4353 |
+
"epoch": 0.1262451717828827,
|
| 4354 |
+
"grad_norm": 0.0971694141626358,
|
| 4355 |
+
"learning_rate": 0.00018749110139326759,
|
| 4356 |
+
"loss": 0.8473,
|
| 4357 |
+
"step": 621
|
| 4358 |
+
},
|
| 4359 |
+
{
|
| 4360 |
+
"epoch": 0.1264484651351901,
|
| 4361 |
+
"grad_norm": 0.12381591647863388,
|
| 4362 |
+
"learning_rate": 0.0001874707617207363,
|
| 4363 |
+
"loss": 1.1503,
|
| 4364 |
+
"step": 622
|
| 4365 |
+
},
|
| 4366 |
+
{
|
| 4367 |
+
"epoch": 0.12665175848749746,
|
| 4368 |
+
"grad_norm": 0.13411198556423187,
|
| 4369 |
+
"learning_rate": 0.00018745042204820504,
|
| 4370 |
+
"loss": 1.164,
|
| 4371 |
+
"step": 623
|
| 4372 |
+
},
|
| 4373 |
+
{
|
| 4374 |
+
"epoch": 0.12685505183980483,
|
| 4375 |
+
"grad_norm": 0.12838509678840637,
|
| 4376 |
+
"learning_rate": 0.00018743008237567376,
|
| 4377 |
+
"loss": 1.1768,
|
| 4378 |
+
"step": 624
|
| 4379 |
+
},
|
| 4380 |
+
{
|
| 4381 |
+
"epoch": 0.1270583451921122,
|
| 4382 |
+
"grad_norm": 0.11623813211917877,
|
| 4383 |
+
"learning_rate": 0.00018740974270314248,
|
| 4384 |
+
"loss": 1.1611,
|
| 4385 |
+
"step": 625
|
| 4386 |
+
},
|
| 4387 |
+
{
|
| 4388 |
+
"epoch": 0.1272616385444196,
|
| 4389 |
+
"grad_norm": 0.11001920700073242,
|
| 4390 |
+
"learning_rate": 0.0001873894030306112,
|
| 4391 |
+
"loss": 1.0182,
|
| 4392 |
+
"step": 626
|
| 4393 |
+
},
|
| 4394 |
+
{
|
| 4395 |
+
"epoch": 0.12746493189672697,
|
| 4396 |
+
"grad_norm": 0.11987441778182983,
|
| 4397 |
+
"learning_rate": 0.00018736906335807996,
|
| 4398 |
+
"loss": 1.0509,
|
| 4399 |
+
"step": 627
|
| 4400 |
+
},
|
| 4401 |
+
{
|
| 4402 |
+
"epoch": 0.12766822524903434,
|
| 4403 |
+
"grad_norm": 0.13036808371543884,
|
| 4404 |
+
"learning_rate": 0.00018734872368554868,
|
| 4405 |
+
"loss": 1.2035,
|
| 4406 |
+
"step": 628
|
| 4407 |
+
},
|
| 4408 |
+
{
|
| 4409 |
+
"epoch": 0.12787151860134174,
|
| 4410 |
+
"grad_norm": 0.12546774744987488,
|
| 4411 |
+
"learning_rate": 0.0001873283840130174,
|
| 4412 |
+
"loss": 1.1434,
|
| 4413 |
+
"step": 629
|
| 4414 |
+
},
|
| 4415 |
+
{
|
| 4416 |
+
"epoch": 0.12807481195364911,
|
| 4417 |
+
"grad_norm": 0.1025729849934578,
|
| 4418 |
+
"learning_rate": 0.00018730804434048613,
|
| 4419 |
+
"loss": 0.9868,
|
| 4420 |
+
"step": 630
|
| 4421 |
+
},
|
| 4422 |
+
{
|
| 4423 |
+
"epoch": 0.12827810530595649,
|
| 4424 |
+
"grad_norm": 0.1013616994023323,
|
| 4425 |
+
"learning_rate": 0.00018728770466795483,
|
| 4426 |
+
"loss": 0.9281,
|
| 4427 |
+
"step": 631
|
| 4428 |
+
},
|
| 4429 |
+
{
|
| 4430 |
+
"epoch": 0.12848139865826388,
|
| 4431 |
+
"grad_norm": 0.11066362261772156,
|
| 4432 |
+
"learning_rate": 0.00018726736499542358,
|
| 4433 |
+
"loss": 1.0345,
|
| 4434 |
+
"step": 632
|
| 4435 |
+
},
|
| 4436 |
+
{
|
| 4437 |
+
"epoch": 0.12868469201057126,
|
| 4438 |
+
"grad_norm": 0.1280633807182312,
|
| 4439 |
+
"learning_rate": 0.0001872470253228923,
|
| 4440 |
+
"loss": 1.2335,
|
| 4441 |
+
"step": 633
|
| 4442 |
+
},
|
| 4443 |
+
{
|
| 4444 |
+
"epoch": 0.12888798536287863,
|
| 4445 |
+
"grad_norm": 0.11954978853464127,
|
| 4446 |
+
"learning_rate": 0.00018722668565036103,
|
| 4447 |
+
"loss": 1.0298,
|
| 4448 |
+
"step": 634
|
| 4449 |
+
},
|
| 4450 |
+
{
|
| 4451 |
+
"epoch": 0.12909127871518603,
|
| 4452 |
+
"grad_norm": 0.11124943196773529,
|
| 4453 |
+
"learning_rate": 0.00018720634597782976,
|
| 4454 |
+
"loss": 1.0896,
|
| 4455 |
+
"step": 635
|
| 4456 |
+
},
|
| 4457 |
+
{
|
| 4458 |
+
"epoch": 0.1292945720674934,
|
| 4459 |
+
"grad_norm": 0.12496782839298248,
|
| 4460 |
+
"learning_rate": 0.0001871860063052985,
|
| 4461 |
+
"loss": 1.0897,
|
| 4462 |
+
"step": 636
|
| 4463 |
+
},
|
| 4464 |
+
{
|
| 4465 |
+
"epoch": 0.12949786541980077,
|
| 4466 |
+
"grad_norm": 0.1257556527853012,
|
| 4467 |
+
"learning_rate": 0.00018716566663276723,
|
| 4468 |
+
"loss": 1.0148,
|
| 4469 |
+
"step": 637
|
| 4470 |
+
},
|
| 4471 |
+
{
|
| 4472 |
+
"epoch": 0.12970115877210814,
|
| 4473 |
+
"grad_norm": 0.11928705126047134,
|
| 4474 |
+
"learning_rate": 0.00018714532696023596,
|
| 4475 |
+
"loss": 1.1415,
|
| 4476 |
+
"step": 638
|
| 4477 |
+
},
|
| 4478 |
+
{
|
| 4479 |
+
"epoch": 0.12990445212441554,
|
| 4480 |
+
"grad_norm": 0.1109057068824768,
|
| 4481 |
+
"learning_rate": 0.00018712498728770468,
|
| 4482 |
+
"loss": 1.063,
|
| 4483 |
+
"step": 639
|
| 4484 |
+
},
|
| 4485 |
+
{
|
| 4486 |
+
"epoch": 0.1301077454767229,
|
| 4487 |
+
"grad_norm": 0.13905195891857147,
|
| 4488 |
+
"learning_rate": 0.0001871046476151734,
|
| 4489 |
+
"loss": 1.2346,
|
| 4490 |
+
"step": 640
|
| 4491 |
+
},
|
| 4492 |
+
{
|
| 4493 |
+
"epoch": 0.13031103882903028,
|
| 4494 |
+
"grad_norm": 0.12306763231754303,
|
| 4495 |
+
"learning_rate": 0.00018708430794264213,
|
| 4496 |
+
"loss": 1.0504,
|
| 4497 |
+
"step": 641
|
| 4498 |
+
},
|
| 4499 |
+
{
|
| 4500 |
+
"epoch": 0.13051433218133768,
|
| 4501 |
+
"grad_norm": 0.1077868863940239,
|
| 4502 |
+
"learning_rate": 0.00018706396827011085,
|
| 4503 |
+
"loss": 0.9143,
|
| 4504 |
+
"step": 642
|
| 4505 |
+
},
|
| 4506 |
+
{
|
| 4507 |
+
"epoch": 0.13071762553364505,
|
| 4508 |
+
"grad_norm": 0.1328214555978775,
|
| 4509 |
+
"learning_rate": 0.00018704362859757958,
|
| 4510 |
+
"loss": 1.1223,
|
| 4511 |
+
"step": 643
|
| 4512 |
+
},
|
| 4513 |
+
{
|
| 4514 |
+
"epoch": 0.13092091888595242,
|
| 4515 |
+
"grad_norm": 0.12459075450897217,
|
| 4516 |
+
"learning_rate": 0.00018702328892504833,
|
| 4517 |
+
"loss": 1.1896,
|
| 4518 |
+
"step": 644
|
| 4519 |
+
},
|
| 4520 |
+
{
|
| 4521 |
+
"epoch": 0.13112421223825982,
|
| 4522 |
+
"grad_norm": 0.11860411614179611,
|
| 4523 |
+
"learning_rate": 0.00018700294925251705,
|
| 4524 |
+
"loss": 1.0472,
|
| 4525 |
+
"step": 645
|
| 4526 |
+
},
|
| 4527 |
+
{
|
| 4528 |
+
"epoch": 0.1313275055905672,
|
| 4529 |
+
"grad_norm": 0.11825944483280182,
|
| 4530 |
+
"learning_rate": 0.00018698260957998578,
|
| 4531 |
+
"loss": 1.2391,
|
| 4532 |
+
"step": 646
|
| 4533 |
+
},
|
| 4534 |
+
{
|
| 4535 |
+
"epoch": 0.13153079894287456,
|
| 4536 |
+
"grad_norm": 0.12103937566280365,
|
| 4537 |
+
"learning_rate": 0.0001869622699074545,
|
| 4538 |
+
"loss": 1.0087,
|
| 4539 |
+
"step": 647
|
| 4540 |
+
},
|
| 4541 |
+
{
|
| 4542 |
+
"epoch": 0.13173409229518196,
|
| 4543 |
+
"grad_norm": 0.12289803475141525,
|
| 4544 |
+
"learning_rate": 0.00018694193023492323,
|
| 4545 |
+
"loss": 1.0867,
|
| 4546 |
+
"step": 648
|
| 4547 |
+
},
|
| 4548 |
+
{
|
| 4549 |
+
"epoch": 0.13193738564748933,
|
| 4550 |
+
"grad_norm": 0.12652850151062012,
|
| 4551 |
+
"learning_rate": 0.00018692159056239195,
|
| 4552 |
+
"loss": 1.2047,
|
| 4553 |
+
"step": 649
|
| 4554 |
+
},
|
| 4555 |
+
{
|
| 4556 |
+
"epoch": 0.1321406789997967,
|
| 4557 |
+
"grad_norm": 0.12258271127939224,
|
| 4558 |
+
"learning_rate": 0.00018690125088986068,
|
| 4559 |
+
"loss": 0.9806,
|
| 4560 |
+
"step": 650
|
| 4561 |
+
},
|
| 4562 |
+
{
|
| 4563 |
+
"epoch": 0.1323439723521041,
|
| 4564 |
+
"grad_norm": 0.1285620778799057,
|
| 4565 |
+
"learning_rate": 0.0001868809112173294,
|
| 4566 |
+
"loss": 0.9993,
|
| 4567 |
+
"step": 651
|
| 4568 |
+
},
|
| 4569 |
+
{
|
| 4570 |
+
"epoch": 0.13254726570441147,
|
| 4571 |
+
"grad_norm": 0.11906328797340393,
|
| 4572 |
+
"learning_rate": 0.00018686057154479815,
|
| 4573 |
+
"loss": 1.1029,
|
| 4574 |
+
"step": 652
|
| 4575 |
+
},
|
| 4576 |
+
{
|
| 4577 |
+
"epoch": 0.13275055905671884,
|
| 4578 |
+
"grad_norm": 0.13393160700798035,
|
| 4579 |
+
"learning_rate": 0.00018684023187226688,
|
| 4580 |
+
"loss": 1.1263,
|
| 4581 |
+
"step": 653
|
| 4582 |
+
},
|
| 4583 |
+
{
|
| 4584 |
+
"epoch": 0.1329538524090262,
|
| 4585 |
+
"grad_norm": 0.13850244879722595,
|
| 4586 |
+
"learning_rate": 0.0001868198921997356,
|
| 4587 |
+
"loss": 1.0878,
|
| 4588 |
+
"step": 654
|
| 4589 |
+
},
|
| 4590 |
+
{
|
| 4591 |
+
"epoch": 0.1331571457613336,
|
| 4592 |
+
"grad_norm": 0.13923142850399017,
|
| 4593 |
+
"learning_rate": 0.00018679955252720433,
|
| 4594 |
+
"loss": 1.1637,
|
| 4595 |
+
"step": 655
|
| 4596 |
+
},
|
| 4597 |
+
{
|
| 4598 |
+
"epoch": 0.13336043911364098,
|
| 4599 |
+
"grad_norm": 0.11642129719257355,
|
| 4600 |
+
"learning_rate": 0.00018677921285467305,
|
| 4601 |
+
"loss": 1.1134,
|
| 4602 |
+
"step": 656
|
| 4603 |
+
},
|
| 4604 |
+
{
|
| 4605 |
+
"epoch": 0.13356373246594835,
|
| 4606 |
+
"grad_norm": 0.12743037939071655,
|
| 4607 |
+
"learning_rate": 0.00018675887318214178,
|
| 4608 |
+
"loss": 1.0345,
|
| 4609 |
+
"step": 657
|
| 4610 |
+
},
|
| 4611 |
+
{
|
| 4612 |
+
"epoch": 0.13376702581825575,
|
| 4613 |
+
"grad_norm": 0.11360882222652435,
|
| 4614 |
+
"learning_rate": 0.0001867385335096105,
|
| 4615 |
+
"loss": 1.072,
|
| 4616 |
+
"step": 658
|
| 4617 |
+
},
|
| 4618 |
+
{
|
| 4619 |
+
"epoch": 0.13397031917056312,
|
| 4620 |
+
"grad_norm": 0.1262228637933731,
|
| 4621 |
+
"learning_rate": 0.00018671819383707922,
|
| 4622 |
+
"loss": 1.1546,
|
| 4623 |
+
"step": 659
|
| 4624 |
+
},
|
| 4625 |
+
{
|
| 4626 |
+
"epoch": 0.1341736125228705,
|
| 4627 |
+
"grad_norm": 0.1144820973277092,
|
| 4628 |
+
"learning_rate": 0.00018669785416454798,
|
| 4629 |
+
"loss": 1.0152,
|
| 4630 |
+
"step": 660
|
| 4631 |
+
},
|
| 4632 |
+
{
|
| 4633 |
+
"epoch": 0.1343769058751779,
|
| 4634 |
+
"grad_norm": 0.12834620475769043,
|
| 4635 |
+
"learning_rate": 0.0001866775144920167,
|
| 4636 |
+
"loss": 1.0456,
|
| 4637 |
+
"step": 661
|
| 4638 |
+
},
|
| 4639 |
+
{
|
| 4640 |
+
"epoch": 0.13458019922748526,
|
| 4641 |
+
"grad_norm": 0.11835994571447372,
|
| 4642 |
+
"learning_rate": 0.00018665717481948542,
|
| 4643 |
+
"loss": 0.991,
|
| 4644 |
+
"step": 662
|
| 4645 |
+
},
|
| 4646 |
+
{
|
| 4647 |
+
"epoch": 0.13478349257979264,
|
| 4648 |
+
"grad_norm": 0.11445319652557373,
|
| 4649 |
+
"learning_rate": 0.00018663683514695415,
|
| 4650 |
+
"loss": 1.0116,
|
| 4651 |
+
"step": 663
|
| 4652 |
+
},
|
| 4653 |
+
{
|
| 4654 |
+
"epoch": 0.13498678593210003,
|
| 4655 |
+
"grad_norm": 0.13939061760902405,
|
| 4656 |
+
"learning_rate": 0.00018661649547442287,
|
| 4657 |
+
"loss": 1.153,
|
| 4658 |
+
"step": 664
|
| 4659 |
+
},
|
| 4660 |
+
{
|
| 4661 |
+
"epoch": 0.1351900792844074,
|
| 4662 |
+
"grad_norm": 0.1149614006280899,
|
| 4663 |
+
"learning_rate": 0.0001865961558018916,
|
| 4664 |
+
"loss": 0.9255,
|
| 4665 |
+
"step": 665
|
| 4666 |
+
},
|
| 4667 |
+
{
|
| 4668 |
+
"epoch": 0.13539337263671478,
|
| 4669 |
+
"grad_norm": 0.13376334309577942,
|
| 4670 |
+
"learning_rate": 0.00018657581612936032,
|
| 4671 |
+
"loss": 1.1502,
|
| 4672 |
+
"step": 666
|
| 4673 |
+
},
|
| 4674 |
+
{
|
| 4675 |
+
"epoch": 0.13559666598902215,
|
| 4676 |
+
"grad_norm": 0.13265709578990936,
|
| 4677 |
+
"learning_rate": 0.00018655547645682905,
|
| 4678 |
+
"loss": 1.1292,
|
| 4679 |
+
"step": 667
|
| 4680 |
+
},
|
| 4681 |
+
{
|
| 4682 |
+
"epoch": 0.13579995934132955,
|
| 4683 |
+
"grad_norm": 0.11729206144809723,
|
| 4684 |
+
"learning_rate": 0.0001865351367842978,
|
| 4685 |
+
"loss": 1.2166,
|
| 4686 |
+
"step": 668
|
| 4687 |
+
},
|
| 4688 |
+
{
|
| 4689 |
+
"epoch": 0.13600325269363692,
|
| 4690 |
+
"grad_norm": 0.11903608590364456,
|
| 4691 |
+
"learning_rate": 0.00018651479711176652,
|
| 4692 |
+
"loss": 1.1808,
|
| 4693 |
+
"step": 669
|
| 4694 |
+
},
|
| 4695 |
+
{
|
| 4696 |
+
"epoch": 0.1362065460459443,
|
| 4697 |
+
"grad_norm": 0.11009612679481506,
|
| 4698 |
+
"learning_rate": 0.00018649445743923525,
|
| 4699 |
+
"loss": 0.9364,
|
| 4700 |
+
"step": 670
|
| 4701 |
+
},
|
| 4702 |
+
{
|
| 4703 |
+
"epoch": 0.1364098393982517,
|
| 4704 |
+
"grad_norm": 0.13966090977191925,
|
| 4705 |
+
"learning_rate": 0.00018647411776670397,
|
| 4706 |
+
"loss": 1.2463,
|
| 4707 |
+
"step": 671
|
| 4708 |
+
},
|
| 4709 |
+
{
|
| 4710 |
+
"epoch": 0.13661313275055906,
|
| 4711 |
+
"grad_norm": 0.12319371849298477,
|
| 4712 |
+
"learning_rate": 0.00018645377809417267,
|
| 4713 |
+
"loss": 1.1192,
|
| 4714 |
+
"step": 672
|
| 4715 |
+
},
|
| 4716 |
+
{
|
| 4717 |
+
"epoch": 0.13681642610286643,
|
| 4718 |
+
"grad_norm": 0.13469716906547546,
|
| 4719 |
+
"learning_rate": 0.00018643343842164142,
|
| 4720 |
+
"loss": 1.2376,
|
| 4721 |
+
"step": 673
|
| 4722 |
+
},
|
| 4723 |
+
{
|
| 4724 |
+
"epoch": 0.13701971945517383,
|
| 4725 |
+
"grad_norm": 0.124245285987854,
|
| 4726 |
+
"learning_rate": 0.00018641309874911015,
|
| 4727 |
+
"loss": 1.1145,
|
| 4728 |
+
"step": 674
|
| 4729 |
+
},
|
| 4730 |
+
{
|
| 4731 |
+
"epoch": 0.1372230128074812,
|
| 4732 |
+
"grad_norm": 0.1325312852859497,
|
| 4733 |
+
"learning_rate": 0.00018639275907657887,
|
| 4734 |
+
"loss": 1.1453,
|
| 4735 |
+
"step": 675
|
| 4736 |
+
},
|
| 4737 |
+
{
|
| 4738 |
+
"epoch": 0.13742630615978857,
|
| 4739 |
+
"grad_norm": 0.13344690203666687,
|
| 4740 |
+
"learning_rate": 0.0001863724194040476,
|
| 4741 |
+
"loss": 1.2191,
|
| 4742 |
+
"step": 676
|
| 4743 |
+
},
|
| 4744 |
+
{
|
| 4745 |
+
"epoch": 0.13762959951209597,
|
| 4746 |
+
"grad_norm": 0.1301363855600357,
|
| 4747 |
+
"learning_rate": 0.00018635207973151635,
|
| 4748 |
+
"loss": 0.9982,
|
| 4749 |
+
"step": 677
|
| 4750 |
+
},
|
| 4751 |
+
{
|
| 4752 |
+
"epoch": 0.13783289286440334,
|
| 4753 |
+
"grad_norm": 0.10880762338638306,
|
| 4754 |
+
"learning_rate": 0.00018633174005898507,
|
| 4755 |
+
"loss": 0.8772,
|
| 4756 |
+
"step": 678
|
| 4757 |
+
},
|
| 4758 |
+
{
|
| 4759 |
+
"epoch": 0.1380361862167107,
|
| 4760 |
+
"grad_norm": 0.13281653821468353,
|
| 4761 |
+
"learning_rate": 0.0001863114003864538,
|
| 4762 |
+
"loss": 1.0529,
|
| 4763 |
+
"step": 679
|
| 4764 |
+
},
|
| 4765 |
+
{
|
| 4766 |
+
"epoch": 0.13823947956901808,
|
| 4767 |
+
"grad_norm": 0.13998745381832123,
|
| 4768 |
+
"learning_rate": 0.0001862910607139225,
|
| 4769 |
+
"loss": 1.0996,
|
| 4770 |
+
"step": 680
|
| 4771 |
+
},
|
| 4772 |
+
{
|
| 4773 |
+
"epoch": 0.13844277292132548,
|
| 4774 |
+
"grad_norm": 0.1195378452539444,
|
| 4775 |
+
"learning_rate": 0.00018627072104139124,
|
| 4776 |
+
"loss": 0.9776,
|
| 4777 |
+
"step": 681
|
| 4778 |
+
},
|
| 4779 |
+
{
|
| 4780 |
+
"epoch": 0.13864606627363285,
|
| 4781 |
+
"grad_norm": 0.10932020843029022,
|
| 4782 |
+
"learning_rate": 0.00018625038136885997,
|
| 4783 |
+
"loss": 1.1026,
|
| 4784 |
+
"step": 682
|
| 4785 |
+
},
|
| 4786 |
+
{
|
| 4787 |
+
"epoch": 0.13884935962594022,
|
| 4788 |
+
"grad_norm": 0.1420464664697647,
|
| 4789 |
+
"learning_rate": 0.0001862300416963287,
|
| 4790 |
+
"loss": 1.1428,
|
| 4791 |
+
"step": 683
|
| 4792 |
+
},
|
| 4793 |
+
{
|
| 4794 |
+
"epoch": 0.13905265297824762,
|
| 4795 |
+
"grad_norm": 0.11747555434703827,
|
| 4796 |
+
"learning_rate": 0.00018620970202379742,
|
| 4797 |
+
"loss": 0.9985,
|
| 4798 |
+
"step": 684
|
| 4799 |
+
},
|
| 4800 |
+
{
|
| 4801 |
+
"epoch": 0.139255946330555,
|
| 4802 |
+
"grad_norm": 0.11964225023984909,
|
| 4803 |
+
"learning_rate": 0.00018618936235126617,
|
| 4804 |
+
"loss": 1.0268,
|
| 4805 |
+
"step": 685
|
| 4806 |
+
},
|
| 4807 |
+
{
|
| 4808 |
+
"epoch": 0.13945923968286236,
|
| 4809 |
+
"grad_norm": 0.11939354985952377,
|
| 4810 |
+
"learning_rate": 0.0001861690226787349,
|
| 4811 |
+
"loss": 0.993,
|
| 4812 |
+
"step": 686
|
| 4813 |
+
},
|
| 4814 |
+
{
|
| 4815 |
+
"epoch": 0.13966253303516976,
|
| 4816 |
+
"grad_norm": 0.14188724756240845,
|
| 4817 |
+
"learning_rate": 0.00018614868300620362,
|
| 4818 |
+
"loss": 1.0672,
|
| 4819 |
+
"step": 687
|
| 4820 |
+
},
|
| 4821 |
+
{
|
| 4822 |
+
"epoch": 0.13986582638747713,
|
| 4823 |
+
"grad_norm": 0.12218412756919861,
|
| 4824 |
+
"learning_rate": 0.00018612834333367231,
|
| 4825 |
+
"loss": 1.0664,
|
| 4826 |
+
"step": 688
|
| 4827 |
+
},
|
| 4828 |
+
{
|
| 4829 |
+
"epoch": 0.1400691197397845,
|
| 4830 |
+
"grad_norm": 0.12363380193710327,
|
| 4831 |
+
"learning_rate": 0.00018610800366114107,
|
| 4832 |
+
"loss": 1.1202,
|
| 4833 |
+
"step": 689
|
| 4834 |
+
},
|
| 4835 |
+
{
|
| 4836 |
+
"epoch": 0.1402724130920919,
|
| 4837 |
+
"grad_norm": 0.12523901462554932,
|
| 4838 |
+
"learning_rate": 0.0001860876639886098,
|
| 4839 |
+
"loss": 0.9601,
|
| 4840 |
+
"step": 690
|
| 4841 |
+
},
|
| 4842 |
+
{
|
| 4843 |
+
"epoch": 0.14047570644439927,
|
| 4844 |
+
"grad_norm": 0.1359613537788391,
|
| 4845 |
+
"learning_rate": 0.00018606732431607852,
|
| 4846 |
+
"loss": 1.2674,
|
| 4847 |
+
"step": 691
|
| 4848 |
+
},
|
| 4849 |
+
{
|
| 4850 |
+
"epoch": 0.14067899979670664,
|
| 4851 |
+
"grad_norm": 0.12229263782501221,
|
| 4852 |
+
"learning_rate": 0.00018604698464354724,
|
| 4853 |
+
"loss": 1.0817,
|
| 4854 |
+
"step": 692
|
| 4855 |
+
},
|
| 4856 |
+
{
|
| 4857 |
+
"epoch": 0.14088229314901402,
|
| 4858 |
+
"grad_norm": 0.12188601493835449,
|
| 4859 |
+
"learning_rate": 0.000186026644971016,
|
| 4860 |
+
"loss": 1.1176,
|
| 4861 |
+
"step": 693
|
| 4862 |
+
},
|
| 4863 |
+
{
|
| 4864 |
+
"epoch": 0.1410855865013214,
|
| 4865 |
+
"grad_norm": 0.10588016360998154,
|
| 4866 |
+
"learning_rate": 0.00018600630529848472,
|
| 4867 |
+
"loss": 0.9546,
|
| 4868 |
+
"step": 694
|
| 4869 |
+
},
|
| 4870 |
+
{
|
| 4871 |
+
"epoch": 0.14128887985362878,
|
| 4872 |
+
"grad_norm": 0.11985071748495102,
|
| 4873 |
+
"learning_rate": 0.00018598596562595344,
|
| 4874 |
+
"loss": 1.0765,
|
| 4875 |
+
"step": 695
|
| 4876 |
+
},
|
| 4877 |
+
{
|
| 4878 |
+
"epoch": 0.14149217320593616,
|
| 4879 |
+
"grad_norm": 0.13118812441825867,
|
| 4880 |
+
"learning_rate": 0.00018596562595342216,
|
| 4881 |
+
"loss": 1.0117,
|
| 4882 |
+
"step": 696
|
| 4883 |
+
},
|
| 4884 |
+
{
|
| 4885 |
+
"epoch": 0.14169546655824355,
|
| 4886 |
+
"grad_norm": 0.11992435902357101,
|
| 4887 |
+
"learning_rate": 0.0001859452862808909,
|
| 4888 |
+
"loss": 0.9618,
|
| 4889 |
+
"step": 697
|
| 4890 |
+
},
|
| 4891 |
+
{
|
| 4892 |
+
"epoch": 0.14189875991055093,
|
| 4893 |
+
"grad_norm": 0.11617527902126312,
|
| 4894 |
+
"learning_rate": 0.00018592494660835961,
|
| 4895 |
+
"loss": 1.0459,
|
| 4896 |
+
"step": 698
|
| 4897 |
+
},
|
| 4898 |
+
{
|
| 4899 |
+
"epoch": 0.1421020532628583,
|
| 4900 |
+
"grad_norm": 0.12465415149927139,
|
| 4901 |
+
"learning_rate": 0.00018590460693582834,
|
| 4902 |
+
"loss": 1.0635,
|
| 4903 |
+
"step": 699
|
| 4904 |
+
},
|
| 4905 |
+
{
|
| 4906 |
+
"epoch": 0.1423053466151657,
|
| 4907 |
+
"grad_norm": 0.12672793865203857,
|
| 4908 |
+
"learning_rate": 0.00018588426726329706,
|
| 4909 |
+
"loss": 1.26,
|
| 4910 |
+
"step": 700
|
| 4911 |
}
|
| 4912 |
],
|
| 4913 |
"logging_steps": 1,
|
|
|
|
| 4927 |
"attributes": {}
|
| 4928 |
}
|
| 4929 |
},
|
| 4930 |
+
"total_flos": 3.8996507087496806e+17,
|
| 4931 |
"train_batch_size": 8,
|
| 4932 |
"trial_name": null,
|
| 4933 |
"trial_params": null
|