dimasik87 commited on
Commit
abbd614
·
verified ·
1 Parent(s): 84646ce

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ea25614d9527aee2416f569ad9d4af8765984ce7ed545b91b787ac4d54ef32
3
  size 598799664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66ae35d54eadc05b0cc2e4c63799d4aa3ee7f09b5011558c4ee362c661bc370f
3
  size 598799664
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a3ddc0bec23116b66d34935a53165b8645854534678c15f5cb6da531ec0c470
3
  size 167987322
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cb89a57283f79f07cb71f0e9f0c78af5f17fdd954d58987850117d5895e6165
3
  size 167987322
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c691bc6b98e7712e66fc2e35e98847903daf6e08dee56de3671f1e83638f9fa5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35065c6e466e3b1a6e927aae2bc007780adcfae5e57fb17b718b3e1e775e7d48
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c188a6a4749e6ca627bb6d536eb7443f499d5b1b88d98a78f9c713443e010d9c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015707cb16790250630febca682498cb5d3456d5a13443b953687f19dc7d59ed
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.4826228618621826,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.011296243998870376,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -52,6 +52,43 @@
52
  "eval_samples_per_second": 36.302,
53
  "eval_steps_per_second": 9.1,
54
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "logging_steps": 3,
@@ -80,7 +117,7 @@
80
  "attributes": {}
81
  }
82
  },
83
- "total_flos": 2818763107860480.0,
84
  "train_batch_size": 4,
85
  "trial_name": null,
86
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.3386573791503906,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.022592487997740753,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
52
  "eval_samples_per_second": 36.302,
53
  "eval_steps_per_second": 9.1,
54
  "step": 10
55
+ },
56
+ {
57
+ "epoch": 0.013555492798644452,
58
+ "grad_norm": 2.089750051498413,
59
+ "learning_rate": 0.000163742398974869,
60
+ "loss": 7.7554,
61
+ "step": 12
62
+ },
63
+ {
64
+ "epoch": 0.016944365998305563,
65
+ "grad_norm": 2.362656831741333,
66
+ "learning_rate": 0.00013090169943749476,
67
+ "loss": 8.1899,
68
+ "step": 15
69
+ },
70
+ {
71
+ "epoch": 0.016944365998305563,
72
+ "eval_loss": 2.393854856491089,
73
+ "eval_runtime": 20.626,
74
+ "eval_samples_per_second": 36.168,
75
+ "eval_steps_per_second": 9.066,
76
+ "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.020333239197966677,
80
+ "grad_norm": 2.424359083175659,
81
+ "learning_rate": 9.372094804706867e-05,
82
+ "loss": 8.0077,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.022592487997740753,
87
+ "eval_loss": 2.3386573791503906,
88
+ "eval_runtime": 20.5662,
89
+ "eval_samples_per_second": 36.273,
90
+ "eval_steps_per_second": 9.093,
91
+ "step": 20
92
  }
93
  ],
94
  "logging_steps": 3,
 
117
  "attributes": {}
118
  }
119
  },
120
+ "total_flos": 5440868324474880.0,
121
  "train_batch_size": 4,
122
  "trial_name": null,
123
  "trial_params": null