mveroe commited on
Commit
2629a2a
·
verified ·
1 Parent(s): 4f711df

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-4/config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151643,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8960,
12
+ "layer_types": [
13
+ "full_attention",
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention"
41
+ ],
42
+ "max_position_embeddings": 131072,
43
+ "max_window_layers": 28,
44
+ "model_type": "qwen2",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 28,
47
+ "num_key_value_heads": 2,
48
+ "rms_norm_eps": 1e-06,
49
+ "rope_scaling": null,
50
+ "rope_theta": 1000000.0,
51
+ "sliding_window": null,
52
+ "tie_word_embeddings": true,
53
+ "torch_dtype": "bfloat16",
54
+ "transformers_version": "4.55.2",
55
+ "use_cache": true,
56
+ "use_mrope": false,
57
+ "use_sliding_window": false,
58
+ "vocab_size": 151667
59
+ }
checkpoint-4/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.55.2"
6
+ }
checkpoint-4/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcd10ffa98eb215d10606ffa6815b8206d3892df53df847b6ece3f2c7c7ba78c
3
+ size 3086640776
checkpoint-4/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e826e28cec2670d470b28bd7f3fb3019715365253eb658304c8c04b1caf84ba
3
+ size 6056331
checkpoint-4/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed69d3ae118d797e33700fb938c2b3082fd9d43cc793829f446fd120f845714b
3
+ size 15365
checkpoint-4/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86accf27064cdd503053e90476a6bd10de333d4ff0594535ad55ea13a473c91d
3
+ size 15429
checkpoint-4/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ca8d714ef40be035404c1957b5a4dee84e1f43980408393f8aa710552ee6f6
3
+ size 15429
checkpoint-4/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cfdebe99e40accc9c9d8f09c63136a14abda997d9b501969ec8e16e9d183179
3
+ size 15429
checkpoint-4/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a93549b472d84e83e3dbfa90b85dc1f62d693c070a5d79ac12d566104c69cf7
3
+ size 1465
checkpoint-4/trainer_state.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 4,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.2857142857142857,
14
+ "grad_norm": 111.0,
15
+ "learning_rate": 0.0,
16
+ "loss": 5.4707,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.5714285714285714,
21
+ "grad_norm": 116.0,
22
+ "learning_rate": 2.5e-05,
23
+ "loss": 5.6553,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.8571428571428571,
28
+ "grad_norm": 94.5,
29
+ "learning_rate": 5e-05,
30
+ "loss": 4.4183,
31
+ "step": 3
32
+ },
33
+ {
34
+ "epoch": 1.0,
35
+ "grad_norm": 992.0,
36
+ "learning_rate": 4.877641290737884e-05,
37
+ "loss": 5.2182,
38
+ "step": 4
39
+ }
40
+ ],
41
+ "logging_steps": 1,
42
+ "max_steps": 12,
43
+ "num_input_tokens_seen": 0,
44
+ "num_train_epochs": 3,
45
+ "save_steps": 500,
46
+ "stateful_callbacks": {
47
+ "TrainerControl": {
48
+ "args": {
49
+ "should_epoch_stop": false,
50
+ "should_evaluate": false,
51
+ "should_log": false,
52
+ "should_save": true,
53
+ "should_training_stop": false
54
+ },
55
+ "attributes": {}
56
+ }
57
+ },
58
+ "total_flos": 1738958270627840.0,
59
+ "train_batch_size": 4,
60
+ "trial_name": null,
61
+ "trial_params": null
62
+ }
checkpoint-4/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db4d9f2c433c22af5facd43e63c468293f9b658057cb89623c00356c03ce653f
3
+ size 5969