minyong commited on
Commit
e595957
·
verified ·
1 Parent(s): 462c8c6

Training in progress, epoch 0

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
KETI_b1_s4_e1_training_log.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 03/28/2025 03:26:22 - INFO - Output Directory: output/gemma-3-27b-pt/20250328_032606_gemma-3-27b-pt_LoRA
2
+ 03/28/2025 03:26:22 - INFO - Experiment name: KETI_b1_s4_e1
3
+ 03/28/2025 03:26:22 - INFO - Using 6 GPU(s): NVIDIA A100-SXM4-80GB
4
+ 03/28/2025 03:26:22 - INFO - torch_dtype: torch.bfloat16
5
+ 03/28/2025 03:26:22 - INFO - ✅ FFT or LoRA 모드로 학습합니다.
6
+ 03/28/2025 03:26:45 - INFO - Initializing LORA model...
7
+ 03/28/2025 03:27:05 - INFO - gcc -pthread -B /root/pai/envs/llm-finetuning/compiler_compat -DNDEBUG -fwrapv -O2 -Wall -fPIC -O2 -isystem /root/pai/envs/llm-finetuning/include -fPIC -O2 -isystem /root/pai/envs/llm-finetuning/include -fPIC -c /tmp/tmpvhsu4dwj/test.c -o /tmp/tmpvhsu4dwj/test.o
8
+ 03/28/2025 03:27:06 - INFO - gcc -pthread -B /root/pai/envs/llm-finetuning/compiler_compat -DNDEBUG -fwrapv -O2 -Wall -fPIC -O2 -isystem /root/pai/envs/llm-finetuning/include -fPIC -O2 -isystem /root/pai/envs/llm-finetuning/include -fPIC -c /tmp/tmp_hmdziqb/test.c -o /tmp/tmp_hmdziqb/test.o
9
+ 03/28/2025 03:27:07 - INFO - Start Training !
10
+ 03/28/2025 03:27:39 - INFO - [Epoch 0.11] [Step 10] loss: 3.5848
11
+ 03/28/2025 03:28:04 - INFO - [Epoch 0.22] [Step 20] loss: 3.0443
12
+ 03/28/2025 03:28:30 - INFO - [Epoch 0.33] [Step 30] loss: 2.9392
13
+ 03/28/2025 03:28:55 - INFO - [Epoch 0.44] [Step 40] loss: 2.9002
14
+ 03/28/2025 03:29:18 - INFO - [Epoch 0.55] [Step 50] loss: 2.8820
15
+ 03/28/2025 03:29:42 - INFO - [Epoch 0.66] [Step 60] loss: 2.8730
16
+ 03/28/2025 03:30:06 - INFO - [Epoch 0.77] [Step 70] loss: 2.8597
17
+ 03/28/2025 03:30:29 - INFO - [Epoch 0.88] [Step 80] loss: 2.8434
18
+ 03/28/2025 03:30:52 - INFO - [Epoch 0.99] [Step 90] loss: 2.8448
19
+ 03/28/2025 03:31:18 - INFO - ✅ Training complete. Logging system usage...
20
+ 03/28/2025 03:31:18 - INFO - >> System Usage - CPU: 3.3%, RAM: 3.6%, SSD: 72.60GB / 1888.43GB
21
+ 03/28/2025 03:31:18 - INFO - >> GPU 0: 78.23 GB used
22
+ 03/28/2025 03:31:18 - INFO - >> GPU 1: 78.33 GB used
23
+ 03/28/2025 03:31:18 - INFO - >> GPU 2: 77.85 GB used
24
+ 03/28/2025 03:31:18 - INFO - >> GPU 3: 78.40 GB used
25
+ 03/28/2025 03:31:18 - INFO - >> GPU 4: 79.06 GB used
26
+ 03/28/2025 03:31:18 - INFO - >> GPU 5: 77.98 GB used
27
+ 03/28/2025 03:31:18 - INFO - >> Total GPU Memory Used: 469.84 GB
28
+ 03/28/2025 03:31:18 - INFO - >> Total GPU Power Consumption: 550.97 W
adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-27b-pt",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "q_proj",
28
+ "o_proj",
29
+ "k_proj",
30
+ "v_proj",
31
+ "up_proj",
32
+ "gate_proj",
33
+ "down_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d739e6bea9aae45c1075780480ca0529129c6dfbcf132fad9d64c7438d1b3f2
3
+ size 454183400
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
runs/Mar28_03-26-49_llm-server-779876f58-9zzqd/events.out.tfevents.1743132428.llm-server-779876f58-9zzqd.3709446.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:344898f4c07483ecae5a3309a9d944675734bcb48ea2efc020b66a32f4de64fb
3
+ size 9244
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eda9c7d5c08f1a593c6b3b830efa8c6fce2401cf36c721eab924cc2482b9cea
3
+ size 5816