Minbyul commited on
Commit
b526ec0
·
verified ·
1 Parent(s): 73e9b8b

Model save

Browse files
README.md CHANGED
@@ -2,15 +2,11 @@
2
  license: apache-2.0
3
  base_model: BioMistral/BioMistral-7B
4
  tags:
5
- - alignment-handbook
6
- - trl
7
- - sft
8
- - generated_from_trainer
9
  - trl
10
  - sft
11
  - generated_from_trainer
12
  datasets:
13
- - HuggingFaceH4/deita-10k-v0-sft
14
  model-index:
15
  - name: biomistral-7b-wo-kqa_golden-iter-sft-step1
16
  results: []
@@ -21,9 +17,9 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  # biomistral-7b-wo-kqa_golden-iter-sft-step1
23
 
24
- This model is a fine-tuned version of [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B) on the HuggingFaceH4/deita-10k-v0-sft dataset.
25
  It achieves the following results on the evaluation set:
26
- - Loss: 1.4481
27
 
28
  ## Model description
29
 
@@ -60,9 +56,9 @@ The following hyperparameters were used during training:
60
 
61
  | Training Loss | Epoch | Step | Validation Loss |
62
  |:-------------:|:-----:|:----:|:---------------:|
63
- | 0.7793 | 0.92 | 9 | 1.4149 |
64
- | 0.53 | 1.95 | 19 | 1.4281 |
65
- | 0.4017 | 2.77 | 27 | 1.4481 |
66
 
67
 
68
  ### Framework versions
 
2
  license: apache-2.0
3
  base_model: BioMistral/BioMistral-7B
4
  tags:
 
 
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
  datasets:
9
+ - generator
10
  model-index:
11
  - name: biomistral-7b-wo-kqa_golden-iter-sft-step1
12
  results: []
 
17
 
18
  # biomistral-7b-wo-kqa_golden-iter-sft-step1
19
 
20
+ This model is a fine-tuned version of [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 1.4295
23
 
24
  ## Model description
25
 
 
56
 
57
  | Training Loss | Epoch | Step | Validation Loss |
58
  |:-------------:|:-----:|:----:|:---------------:|
59
+ | 0.8334 | 0.92 | 9 | 1.4131 |
60
+ | 0.6303 | 1.95 | 19 | 1.4060 |
61
+ | 0.4647 | 2.77 | 27 | 1.4295 |
62
 
63
 
64
  ### Framework versions
all_results.json CHANGED
@@ -1,13 +1,8 @@
1
  {
2
  "epoch": 2.77,
3
- "eval_loss": 1.4481348991394043,
4
- "eval_runtime": 56.5827,
5
- "eval_samples": 4044,
6
- "eval_samples_per_second": 6.08,
7
- "eval_steps_per_second": 0.389,
8
- "train_loss": 0.5429546391522443,
9
- "train_runtime": 799.7979,
10
  "train_samples": 4750,
11
- "train_samples_per_second": 2.296,
12
- "train_steps_per_second": 0.034
13
  }
 
1
  {
2
  "epoch": 2.77,
3
+ "train_loss": 0.6124309632513258,
4
+ "train_runtime": 676.0028,
 
 
 
 
 
5
  "train_samples": 4750,
6
+ "train_samples_per_second": 2.72,
7
+ "train_steps_per_second": 0.04
8
  }
config.json CHANGED
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.39.0.dev0",
24
- "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.39.0.dev0",
24
+ "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c18da0a1a3f09bc87f97cfe1c0446f523a7c2f9ae81ce7ae332e3db5ee777e4e
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da75fcbb729830fc5bffe2a3dde01be33068b3c5d1138b6f9e6f17fd94b3dff8
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3a4bd00937ce42817336113fd5335cfb14d61cd88d88f473416b1aa7b27a802
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71e80be36b549dbfc42cbfe4fc54e3b51d0f03a4dc48a7f47646010492d2f314
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54d96cc79ddfa2f761ea508c6b6d3554e83caff9716885c615560f64626cade6
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:867985b25124dc090645cae581460d25989b30e7b2f668fc2769f6e68ded1b79
3
  size 4540516344
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.77,
3
- "train_loss": 0.5429546391522443,
4
- "train_runtime": 799.7979,
5
  "train_samples": 4750,
6
- "train_samples_per_second": 2.296,
7
- "train_steps_per_second": 0.034
8
  }
 
1
  {
2
  "epoch": 2.77,
3
+ "train_loss": 0.6124309632513258,
4
+ "train_runtime": 676.0028,
5
  "train_samples": 4750,
6
+ "train_samples_per_second": 2.72,
7
+ "train_steps_per_second": 0.04
8
  }
trainer_state.json CHANGED
@@ -10,78 +10,78 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.1,
13
- "grad_norm": 12.305452972312954,
14
  "learning_rate": 6.666666666666667e-06,
15
- "loss": 0.6151,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.51,
20
- "grad_norm": 39.79159513758007,
21
  "learning_rate": 1.9659258262890683e-05,
22
- "loss": 0.7793,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.92,
27
- "eval_loss": 1.4149478673934937,
28
- "eval_runtime": 41.8351,
29
- "eval_samples_per_second": 8.223,
30
- "eval_steps_per_second": 0.526,
31
  "step": 9
32
  },
33
  {
34
  "epoch": 1.03,
35
- "grad_norm": 3.3298268761353786,
36
  "learning_rate": 1.608761429008721e-05,
37
- "loss": 0.7329,
38
  "step": 10
39
  },
40
  {
41
  "epoch": 1.54,
42
- "grad_norm": 2.6517437110683546,
43
  "learning_rate": 1e-05,
44
- "loss": 0.53,
45
  "step": 15
46
  },
47
  {
48
  "epoch": 1.95,
49
- "eval_loss": 1.4280900955200195,
50
- "eval_runtime": 42.1671,
51
- "eval_samples_per_second": 8.158,
52
- "eval_steps_per_second": 0.522,
53
  "step": 19
54
  },
55
  {
56
  "epoch": 2.05,
57
- "grad_norm": 3.365901304289387,
58
  "learning_rate": 3.912385709912794e-06,
59
- "loss": 0.4284,
60
  "step": 20
61
  },
62
  {
63
  "epoch": 2.56,
64
- "grad_norm": 4.7684471498019105,
65
  "learning_rate": 3.4074173710931804e-07,
66
- "loss": 0.4017,
67
  "step": 25
68
  },
69
  {
70
  "epoch": 2.77,
71
- "eval_loss": 1.4481348991394043,
72
- "eval_runtime": 42.1215,
73
- "eval_samples_per_second": 8.167,
74
- "eval_steps_per_second": 0.522,
75
  "step": 27
76
  },
77
  {
78
  "epoch": 2.77,
79
  "step": 27,
80
  "total_flos": 5600905789440.0,
81
- "train_loss": 0.5429546391522443,
82
- "train_runtime": 799.7979,
83
- "train_samples_per_second": 2.296,
84
- "train_steps_per_second": 0.034
85
  }
86
  ],
87
  "logging_steps": 5,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.1,
13
+ "grad_norm": 24.191700961175112,
14
  "learning_rate": 6.666666666666667e-06,
15
+ "loss": 0.5581,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.51,
20
+ "grad_norm": 12.576350745768869,
21
  "learning_rate": 1.9659258262890683e-05,
22
+ "loss": 0.8334,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.92,
27
+ "eval_loss": 1.4131343364715576,
28
+ "eval_runtime": 38.4185,
29
+ "eval_samples_per_second": 8.954,
30
+ "eval_steps_per_second": 0.573,
31
  "step": 9
32
  },
33
  {
34
  "epoch": 1.03,
35
+ "grad_norm": 17.837211332799143,
36
  "learning_rate": 1.608761429008721e-05,
37
+ "loss": 0.738,
38
  "step": 10
39
  },
40
  {
41
  "epoch": 1.54,
42
+ "grad_norm": 3.797877129136344,
43
  "learning_rate": 1e-05,
44
+ "loss": 0.6303,
45
  "step": 15
46
  },
47
  {
48
  "epoch": 1.95,
49
+ "eval_loss": 1.4060215950012207,
50
+ "eval_runtime": 39.241,
51
+ "eval_samples_per_second": 8.766,
52
+ "eval_steps_per_second": 0.561,
53
  "step": 19
54
  },
55
  {
56
  "epoch": 2.05,
57
+ "grad_norm": 7.485764754081964,
58
  "learning_rate": 3.912385709912794e-06,
59
+ "loss": 0.5348,
60
  "step": 20
61
  },
62
  {
63
  "epoch": 2.56,
64
+ "grad_norm": 3.0173543034517323,
65
  "learning_rate": 3.4074173710931804e-07,
66
+ "loss": 0.4647,
67
  "step": 25
68
  },
69
  {
70
  "epoch": 2.77,
71
+ "eval_loss": 1.4294962882995605,
72
+ "eval_runtime": 38.6474,
73
+ "eval_samples_per_second": 8.901,
74
+ "eval_steps_per_second": 0.569,
75
  "step": 27
76
  },
77
  {
78
  "epoch": 2.77,
79
  "step": 27,
80
  "total_flos": 5600905789440.0,
81
+ "train_loss": 0.6124309632513258,
82
+ "train_runtime": 676.0028,
83
+ "train_samples_per_second": 2.72,
84
+ "train_steps_per_second": 0.04
85
  }
86
  ],
87
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd2ec34fa4d25f81a1a420f99434940129f0d4a5a125492f1dadb1493bd5e669
3
- size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9605dab41a6f0bcc6542eda4f5de7efa4b0c431b8dd3c26e5d4c64263bed48ba
3
+ size 6200