dada22231 commited on
Commit
2f61d44
·
verified ·
1 Parent(s): 9e8d63e

Training in progress, step 75

Browse files
README.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: Qwen/Qwen1.5-7B
5
+ tags:
6
+ - axolotl
7
+ - generated_from_trainer
8
+ - trl
9
+ - grpo
10
+ model-index:
11
+ - name: 2296f73f-99bd-4e6b-95ca-b2cd4a1e78af
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
19
+ <details><summary>See axolotl config</summary>
20
+
21
+ axolotl version: `0.11.0.dev0`
22
+ ```yaml
23
+ adapter: lora
24
+ base_model: Qwen/Qwen1.5-7B
25
+ bf16: true
26
+ chat_template: llama3
27
+ dataloader_num_workers: 0
28
+ dataloader_pin_memory: false
29
+ dataset_info: dcaeb768-0f9e-4c34-920e-d80288595c2a
30
+ dataset_prepared_path: null
31
+ datasets:
32
+ - data_files:
33
+ - ecb638fa488a6a93_train_data.json
34
+ ds_type: json
35
+ format: custom
36
+ path: /workspace/input_data/
37
+ type:
38
+ field_instruction: instruct
39
+ field_output: output
40
+ format: '{instruction}'
41
+ no_input_format: '{instruction}'
42
+ system_format: '{system}'
43
+ system_prompt: ''
44
+ ddp_broadcast_buffers: false
45
+ ddp_bucket_cap_mb: 25
46
+ ddp_timeout: 7200
47
+ debug: null
48
+ deepspeed: null
49
+ evaluation_strategy: 'no'
50
+ flash_attention: false
51
+ flash_attn_cross_entropy: false
52
+ flash_attn_rms_norm: false
53
+ fp16: false
54
+ fsdp: null
55
+ fsdp_config: null
56
+ gpu_memory_limit: null
57
+ gradient_accumulation_steps: 4
58
+ gradient_checkpointing: true
59
+ gradient_checkpointing_kwargs:
60
+ use_reentrant: false
61
+ group_by_length: false
62
+ hub_model_commit_message: Training checkpoint - step {current_step}
63
+ hub_model_id: dada22231/2296f73f-99bd-4e6b-95ca-b2cd4a1e78af
64
+ hub_model_revision: main
65
+ hub_repo: null
66
+ hub_strategy: checkpoint
67
+ hub_token: null
68
+ learning_rate: 0.0002
69
+ load_in_4bit: false
70
+ load_in_8bit: false
71
+ local_rank: null
72
+ logging_steps: 1
73
+ lora_alpha: 256
74
+ lora_dropout: 0.05
75
+ lora_fan_in_fan_out: null
76
+ lora_model_dir: null
77
+ lora_modules_to_save:
78
+ - embed_tokens
79
+ - lm_head
80
+ lora_r: 128
81
+ lora_target_linear: true
82
+ lr_scheduler: constant_with_warmup
83
+ max_memory: null
84
+ max_steps: 1500
85
+ micro_batch_size: 8
86
+ mlflow_experiment_name: /tmp/ecb638fa488a6a93_train_data.json
87
+ model_type: AutoModelForCausalLM
88
+ optimizer: adamw_torch_fused
89
+ output_dir: ./outputs
90
+ pad_to_sequence_len: true
91
+ push_dataset_card: false
92
+ push_to_hub: true
93
+ resume_from_checkpoint: null
94
+ s2_attention: null
95
+ sample_packing: true
96
+ save_lora_adapter: false
97
+ save_merged_lora_model: true
98
+ save_only_model: true
99
+ save_safetensors: true
100
+ save_steps: 75
101
+ save_strategy: steps
102
+ save_total_limit: 5
103
+ sequence_len: 4096
104
+ special_tokens: null
105
+ strict: false
106
+ tf32: true
107
+ tokenizer_type: AutoTokenizer
108
+ torch_compile: false
109
+ torch_compile_backend: inductor
110
+ train_on_inputs: false
111
+ trust_remote_code: true
112
+ val_set_size: 0
113
+ wandb_entity: null
114
+ wandb_mode: online
115
+ wandb_name: dcaeb768-0f9e-4c34-920e-d80288595c2a
116
+ wandb_project: Gradients-On-Demand
117
+ wandb_run: your_name
118
+ wandb_runid: dcaeb768-0f9e-4c34-920e-d80288595c2a
119
+ warmup_steps: 150
120
+ weight_decay: 0.01
121
+ xformers_attention: null
122
+
123
+ ```
124
+
125
+ </details><br>
126
+
127
+ # 2296f73f-99bd-4e6b-95ca-b2cd4a1e78af
128
+
129
+ This model is a fine-tuned version of [Qwen/Qwen1.5-7B](https://huggingface.co/Qwen/Qwen1.5-7B) on an unknown dataset.
130
+
131
+ ## Model description
132
+
133
+ More information needed
134
+
135
+ ## Intended uses & limitations
136
+
137
+ More information needed
138
+
139
+ ## Training and evaluation data
140
+
141
+ More information needed
142
+
143
+ ## Training procedure
144
+
145
+ ### Training hyperparameters
146
+
147
+ The following hyperparameters were used during training:
148
+ - learning_rate: 0.0002
149
+ - train_batch_size: 8
150
+ - eval_batch_size: 8
151
+ - seed: 42
152
+ - gradient_accumulation_steps: 4
153
+ - total_train_batch_size: 32
154
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
155
+ - lr_scheduler_type: constant_with_warmup
156
+ - lr_scheduler_warmup_steps: 150
157
+ - training_steps: 1500
158
+
159
+ ### Training results
160
+
161
+
162
+
163
+ ### Framework versions
164
+
165
+ - PEFT 0.15.2
166
+ - Transformers 4.52.4
167
+ - Pytorch 2.7.1+cu128
168
+ - Datasets 3.6.0
169
+ - Tokenizers 0.21.1
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/SmolLM-1.7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": null,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": [
22
+ "embed_tokens",
23
+ "lm_head"
24
+ ],
25
+ "peft_type": "LORA",
26
+ "r": 128,
27
+ "rank_pattern": {},
28
+ "revision": null,
29
+ "target_modules": [
30
+ "q_proj",
31
+ "gate_proj",
32
+ "v_proj",
33
+ "o_proj",
34
+ "up_proj",
35
+ "down_proj",
36
+ "k_proj"
37
+ ],
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_rslora": false
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:465744cb64224a402eabbe8d37014239041dbc83f5545b208ef159d0fe46a18c
3
+ size 981512984
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 64,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 8192,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 24,
19
+ "num_key_value_heads": 32,
20
+ "pad_token_id": 2,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000.0,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.52.4",
28
+ "use_cache": false,
29
+ "vocab_size": 49152
30
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "do_sample": true,
5
+ "eos_token_id": 50256,
6
+ "transformers_version": "4.51.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
runs/Jun09_23-22-31_c114e6e933dd/events.out.tfevents.1749511355.c114e6e933dd.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29549a0ad006d8bc912aad7003ba48351b1251cf4cb142801e51af06509dd80
3
+ size 13612
runs/Jun09_23-31-32_9c235502913a/events.out.tfevents.1749511899.9c235502913a.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82fb21d4c6709ed19de023a56fe96bf99c64a9d55de9bd24a6d56eca957bf785
3
+ size 7964
runs/Jun10_08-35-17_13b1206c444e/events.out.tfevents.1749544520.13b1206c444e.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1eabc3881eb8b03f46a893ca5ff6923e893e5622f62e719adc1ce8defcf173
3
+ size 2342413
runs/Jun10_15-32-42_9609e117153f/events.out.tfevents.1749569582.9609e117153f.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb82e81c9f7a16b79e78ac8386a6676e2bb46540a1e64ee13e61e17e6e61f59
3
+ size 7735
runs/Jun10_15-36-01_4ac9cfab9c4f/events.out.tfevents.1749569781.4ac9cfab9c4f.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b89ae32d0ddfce162814d3e2f659898d441b422d823bba93c39b3d2aba88b1d6
3
+ size 7762
runs/Jun10_15-41-31_66aabbe28b99/events.out.tfevents.1749570111.66aabbe28b99.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e300b1341d7d49a7bb644fd40e7550b0525f07835e57a1f79fdcb6c9e2fb4e99
3
+ size 7760
runs/Jun10_15-43-46_ed3e63477d80/events.out.tfevents.1749570233.ed3e63477d80.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0190cdb1353a4900e54ddca040ce5f57075cab85fb6cdebc355c831112295cd6
3
+ size 2865114
runs/Jun10_15-43-49_cb3333f60b74/events.out.tfevents.1749570249.cb3333f60b74.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29bce4e30493f9064af0cf40e1a2b81732c0ce13e598d20be02b44fb28940abb
3
+ size 11816
runs/Jun10_15-52-24_79913464786a/events.out.tfevents.1749570765.79913464786a.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22201be31b78560a708bd4c403c8f2ffca43e5e553364964b16be4805a70ba55
3
+ size 7732
runs/Jun10_15-55-31_d2fb0129b76c/events.out.tfevents.1749570951.d2fb0129b76c.280.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce72a313ace4b3ad3d38e7509f82e40abbc551d6876ef1a10b06259c895d089
3
+ size 1107390
runs/Jun10_18-18-52_281190cd23f4/events.out.tfevents.1749579538.281190cd23f4.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e0e99b9d2393448a8df149d455afe759a2a04f58f7ae11ca18d308cebd9ff8
3
+ size 7904
runs/Jun10_18-50-44_658ebac1f2a9/events.out.tfevents.1749581450.658ebac1f2a9.281.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:631994a159ba98dce996e687535d7db3079de1d02261c57f5c41a2d376ccff4f
3
+ size 7877
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<|im_start|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|im_end|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<empty_output>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
144
+ ],
145
+ "bos_token": "<|im_start|>",
146
+ "clean_up_tokenization_spaces": false,
147
+ "eos_token": "<|im_end|>",
148
+ "extra_special_tokens": {},
149
+ "model_max_length": 2048,
150
+ "pad_token": "<empty_output>",
151
+ "padding_side": "left",
152
+ "tokenizer_class": "GPT2Tokenizer",
153
+ "unk_token": "<|endoftext|>",
154
+ "vocab_size": 49152
155
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d03a7322ecf5b32667396bffe140e92ebcd720c4f3d7af5b2a9578dbddafb2b
3
+ size 8273
vocab.json ADDED
The diff for this file is too large to render. See raw diff