shibajustfor commited on
Commit
6e96428
·
verified ·
1 Parent(s): e294a3f

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6c109aa6c05e83e81fd9fae11bfa61cc2d9528196064f893d40862948038533
3
  size 39256456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8838cfd1ed0bc19e91b5a81645cbfeae2b22006d9ec0630929198bed13057925
3
  size 39256456
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc23ad10e8ff024f2e5dd7a2166f8dbc531521be6e85a458e5eae0be327f8d21
3
  size 20632826
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef1694a21b60db646930f981906f7617666debe86f16c9096862c34b8c05b1dd
3
  size 20632826
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0327c646fbcebdbfe58bb39ffa0aa64a521ffb921c8df37b9e57436d246f1866
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa173be0daf3b70291aa54f3fc172b70d25960de7e9a07e7f57c5f27ae513c9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b80fcc7599efca0c6313d990c467c2eb3001742b23ddaadc22e3499c12cea79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0004588987348161881,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 63.628,
59
  "eval_steps_per_second": 31.815,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 2447388966912000.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0009177974696323762,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 63.628,
59
  "eval_steps_per_second": 31.815,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.0005506784817794257,
64
+ "grad_norm": 6.173509120941162,
65
+ "learning_rate": 0.0002,
66
+ "loss": 3.0467,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.0006424582287426633,
71
+ "grad_norm": 2.9664249420166016,
72
+ "learning_rate": 0.0002,
73
+ "loss": 3.0159,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.0007342379757059009,
78
+ "grad_norm": 3.5428833961486816,
79
+ "learning_rate": 0.0002,
80
+ "loss": 3.0559,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.0008260177226691386,
85
+ "grad_norm": 4.495511054992676,
86
+ "learning_rate": 0.0002,
87
+ "loss": 2.8184,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.0009177974696323762,
92
+ "grad_norm": 4.205393314361572,
93
+ "learning_rate": 0.0002,
94
+ "loss": 2.8674,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.0009177974696323762,
99
+ "eval_loss": 2.848527431488037,
100
+ "eval_runtime": 719.5853,
101
+ "eval_samples_per_second": 63.755,
102
+ "eval_steps_per_second": 31.878,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 4894777933824000.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null