abarbosa commited on
Commit
9ae740e
·
verified ·
1 Parent(s): f84f4a0

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: ricardoz/BERTugues-base-portuguese-cased
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: transformers
15
+ model-index:
16
+ - name: BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1
28
+ type: f1
29
+ value: 0.2885197389992787
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.5408745247148289
33
+ - name: Weighted Macro F1
34
+ type: f1
35
+ value: 0.5051079377027337
36
+ ---
37
+ # Model ID: BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only
38
+ ## Results
39
+ | | test_data |
40
+ |:-----------------|------------:|
41
+ | eval_accuracy | 0.492754 |
42
+ | eval_RMSE | 31.2076 |
43
+ | eval_QWK | 0.540875 |
44
+ | eval_Macro_F1 | 0.28852 |
45
+ | eval_Weighted_F1 | 0.505108 |
46
+ | eval_Micro_F1 | 0.492754 |
47
+ | eval_HDIV | 0.00724638 |
48
+
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": 0,
12
+ "1": 40,
13
+ "2": 80,
14
+ "3": 120,
15
+ "4": 160,
16
+ "5": 200
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "0": 0,
22
+ "40": 1,
23
+ "80": 2,
24
+ "120": 3,
25
+ "160": 4,
26
+ "200": 5
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 12,
33
+ "pad_token_id": 0,
34
+ "position_embedding_type": "absolute",
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.53.1",
37
+ "type_vocab_size": 2,
38
+ "use_cache": true,
39
+ "vocab_size": 30522
40
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2025-07-09T17:08:23,jbcs2025,c0944ce8-a1c4-4f61-b7c1-e8c230270aae,BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only,124.09483157697832,0.00258094138718514,2.0798137637054887e-05,54.6325,144.3331460987135,58.0,0.0014930500536145782,0.007276071376407955,0.0019587412501549505,0.010727862680177487,Romania,ROU,gorj county,,,Linux-5.15.0-143-generic-x86_64-with-glibc2.35,3.12.11,3.0.2,36,Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz,1,1 x NVIDIA RTX A6000,23.2904,45.0489,393.6063117980957,machine,N,1.0
evaluation_results.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss,eval_model_preparation_time,eval_accuracy,eval_RMSE,eval_QWK,eval_HDIV,eval_Macro_F1,eval_Micro_F1,eval_Weighted_F1,eval_TP_0,eval_TN_0,eval_FP_0,eval_FN_0,eval_TP_1,eval_TN_1,eval_FP_1,eval_FN_1,eval_TP_2,eval_TN_2,eval_FP_2,eval_FN_2,eval_TP_3,eval_TN_3,eval_FP_3,eval_FN_3,eval_TP_4,eval_TN_4,eval_FP_4,eval_FN_4,eval_TP_5,eval_TN_5,eval_FP_5,eval_FN_5,eval_runtime,eval_samples_per_second,eval_steps_per_second,epoch,reference,timestamp,id
2
+ 1.7812554836273193,0.0022,0.3181818181818182,49.11335065052284,0.014999250037498024,0.06060606060606055,0.10120481927710842,0.3181818181818182,0.1840087623220153,0,125,6,1,0,132,0,0,0,120,8,4,0,68,0,64,42,8,76,6,0,117,0,15,0.7413,178.059,12.14,-1,validation_before_training,2025-07-09 17:06:28,BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only
3
+ 1.7043002843856812,0.0022,0.5,32.47376563543955,0.5530126109294722,0.0,0.3471042547949657,0.5,0.5155880326753562,0,131,0,1,0,132,0,0,2,116,12,2,31,53,15,33,24,62,22,24,9,100,17,6,0.3934,335.544,22.878,16.0,validation_after_training,2025-07-09 17:06:28,BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only
4
+ 1.6317094564437866,0.0022,0.4927536231884058,31.20757990421976,0.5408745247148289,0.007246376811594235,0.2885197389992787,0.4927536231884058,0.5051079377027337,0,137,0,1,0,137,0,1,7,106,23,2,41,41,21,35,17,75,17,29,3,124,9,2,0.3914,352.571,22.994,16.0,test_results,2025-07-09 17:06:28,BERTugues-base-portuguese-cased-encoder_classification-C4-essay_only
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09640891f9f0ce6d2d580776c853cbf8333f13b21f5c618680c639fb6204ca84
3
+ size 437970952
run_experiment.log ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-07-09 17:06:16,012][__main__][INFO] - cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ bootstrap:
12
+ enabled: true
13
+ n_bootstrap: 10000
14
+ bootstrap_seed: 42
15
+ metrics:
16
+ - QWK
17
+ - Macro_F1
18
+ - Weighted_F1
19
+ post_training_results:
20
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
21
+ experiments:
22
+ model:
23
+ name: ricardoz/BERTugues-base-portuguese-cased
24
+ type: encoder_classification
25
+ num_labels: 6
26
+ output_dir: ./results/
27
+ logging_dir: ./logs/
28
+ best_model_dir: ./results/best_model
29
+ tokenizer:
30
+ name: ricardoz/BERTugues-base-portuguese-cased
31
+ dataset:
32
+ grade_index: 3
33
+ use_full_context: false
34
+ training_params:
35
+ weight_decay: 0.01
36
+ warmup_ratio: 0.1
37
+ learning_rate: 5.0e-05
38
+ train_batch_size: 16
39
+ eval_batch_size: 16
40
+ gradient_accumulation_steps: 1
41
+ gradient_checkpointing: false
42
+
43
+ [2025-07-09 17:06:19,861][__main__][INFO] - GPU 0: NVIDIA RTX A6000 | TDP ≈ 300 W
44
+ [2025-07-09 17:06:19,861][__main__][INFO] - Starting the Fine Tuning training process.
45
+ [2025-07-09 17:06:24,826][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
46
+ [2025-07-09 17:06:24,827][transformers.configuration_utils][INFO] - Model config BertConfig {
47
+ "architectures": [
48
+ "BertForPreTraining"
49
+ ],
50
+ "attention_probs_dropout_prob": 0.1,
51
+ "classifier_dropout": null,
52
+ "hidden_act": "gelu",
53
+ "hidden_dropout_prob": 0.1,
54
+ "hidden_size": 768,
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "layer_norm_eps": 1e-12,
58
+ "max_position_embeddings": 512,
59
+ "model_type": "bert",
60
+ "num_attention_heads": 12,
61
+ "num_hidden_layers": 12,
62
+ "pad_token_id": 0,
63
+ "position_embedding_type": "absolute",
64
+ "torch_dtype": "float32",
65
+ "transformers_version": "4.53.1",
66
+ "type_vocab_size": 2,
67
+ "use_cache": true,
68
+ "vocab_size": 30522
69
+ }
70
+
71
+ [2025-07-09 17:06:25,030][transformers.models.auto.tokenization_auto][INFO] - Could not locate the tokenizer configuration file, will try to use the model config instead.
72
+ [2025-07-09 17:06:25,241][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
73
+ [2025-07-09 17:06:25,242][transformers.configuration_utils][INFO] - Model config BertConfig {
74
+ "architectures": [
75
+ "BertForPreTraining"
76
+ ],
77
+ "attention_probs_dropout_prob": 0.1,
78
+ "classifier_dropout": null,
79
+ "hidden_act": "gelu",
80
+ "hidden_dropout_prob": 0.1,
81
+ "hidden_size": 768,
82
+ "initializer_range": 0.02,
83
+ "intermediate_size": 3072,
84
+ "layer_norm_eps": 1e-12,
85
+ "max_position_embeddings": 512,
86
+ "model_type": "bert",
87
+ "num_attention_heads": 12,
88
+ "num_hidden_layers": 12,
89
+ "pad_token_id": 0,
90
+ "position_embedding_type": "absolute",
91
+ "torch_dtype": "float32",
92
+ "transformers_version": "4.53.1",
93
+ "type_vocab_size": 2,
94
+ "use_cache": true,
95
+ "vocab_size": 30522
96
+ }
97
+
98
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/vocab.txt
99
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
100
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
101
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None
102
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at None
103
+ [2025-07-09 17:06:25,880][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
104
+ [2025-07-09 17:06:25,880][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
105
+ [2025-07-09 17:06:25,881][transformers.configuration_utils][INFO] - Model config BertConfig {
106
+ "architectures": [
107
+ "BertForPreTraining"
108
+ ],
109
+ "attention_probs_dropout_prob": 0.1,
110
+ "classifier_dropout": null,
111
+ "hidden_act": "gelu",
112
+ "hidden_dropout_prob": 0.1,
113
+ "hidden_size": 768,
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 3072,
116
+ "layer_norm_eps": 1e-12,
117
+ "max_position_embeddings": 512,
118
+ "model_type": "bert",
119
+ "num_attention_heads": 12,
120
+ "num_hidden_layers": 12,
121
+ "pad_token_id": 0,
122
+ "position_embedding_type": "absolute",
123
+ "torch_dtype": "float32",
124
+ "transformers_version": "4.53.1",
125
+ "type_vocab_size": 2,
126
+ "use_cache": true,
127
+ "vocab_size": 30522
128
+ }
129
+
130
+ [2025-07-09 17:06:25,912][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
131
+ [2025-07-09 17:06:25,913][transformers.configuration_utils][INFO] - Model config BertConfig {
132
+ "architectures": [
133
+ "BertForPreTraining"
134
+ ],
135
+ "attention_probs_dropout_prob": 0.1,
136
+ "classifier_dropout": null,
137
+ "hidden_act": "gelu",
138
+ "hidden_dropout_prob": 0.1,
139
+ "hidden_size": 768,
140
+ "initializer_range": 0.02,
141
+ "intermediate_size": 3072,
142
+ "layer_norm_eps": 1e-12,
143
+ "max_position_embeddings": 512,
144
+ "model_type": "bert",
145
+ "num_attention_heads": 12,
146
+ "num_hidden_layers": 12,
147
+ "pad_token_id": 0,
148
+ "position_embedding_type": "absolute",
149
+ "torch_dtype": "float32",
150
+ "transformers_version": "4.53.1",
151
+ "type_vocab_size": 2,
152
+ "use_cache": true,
153
+ "vocab_size": 30522
154
+ }
155
+
156
+ [2025-07-09 17:06:25,933][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
157
+ [2025-07-09 17:06:26,599][__main__][INFO] -
158
+ Token statistics for 'train' split:
159
+ [2025-07-09 17:06:26,599][__main__][INFO] - Total examples: 500
160
+ [2025-07-09 17:06:26,599][__main__][INFO] - Min tokens: 512
161
+ [2025-07-09 17:06:26,599][__main__][INFO] - Max tokens: 512
162
+ [2025-07-09 17:06:26,599][__main__][INFO] - Avg tokens: 512.00
163
+ [2025-07-09 17:06:26,599][__main__][INFO] - Std tokens: 0.00
164
+ [2025-07-09 17:06:26,696][__main__][INFO] -
165
+ Token statistics for 'validation' split:
166
+ [2025-07-09 17:06:26,696][__main__][INFO] - Total examples: 132
167
+ [2025-07-09 17:06:26,696][__main__][INFO] - Min tokens: 512
168
+ [2025-07-09 17:06:26,696][__main__][INFO] - Max tokens: 512
169
+ [2025-07-09 17:06:26,696][__main__][INFO] - Avg tokens: 512.00
170
+ [2025-07-09 17:06:26,696][__main__][INFO] - Std tokens: 0.00
171
+ [2025-07-09 17:06:26,798][__main__][INFO] -
172
+ Token statistics for 'test' split:
173
+ [2025-07-09 17:06:26,798][__main__][INFO] - Total examples: 138
174
+ [2025-07-09 17:06:26,798][__main__][INFO] - Min tokens: 512
175
+ [2025-07-09 17:06:26,798][__main__][INFO] - Max tokens: 512
176
+ [2025-07-09 17:06:26,798][__main__][INFO] - Avg tokens: 512.00
177
+ [2025-07-09 17:06:26,798][__main__][INFO] - Std tokens: 0.00
178
+ [2025-07-09 17:06:26,798][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
179
+ [2025-07-09 17:06:26,798][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
180
+ [2025-07-09 17:06:27,018][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
181
+ [2025-07-09 17:06:27,018][transformers.configuration_utils][INFO] - Model config BertConfig {
182
+ "architectures": [
183
+ "BertForPreTraining"
184
+ ],
185
+ "attention_probs_dropout_prob": 0.1,
186
+ "classifier_dropout": null,
187
+ "hidden_act": "gelu",
188
+ "hidden_dropout_prob": 0.1,
189
+ "hidden_size": 768,
190
+ "id2label": {
191
+ "0": 0,
192
+ "1": 40,
193
+ "2": 80,
194
+ "3": 120,
195
+ "4": 160,
196
+ "5": 200
197
+ },
198
+ "initializer_range": 0.02,
199
+ "intermediate_size": 3072,
200
+ "label2id": {
201
+ "0": 0,
202
+ "40": 1,
203
+ "80": 2,
204
+ "120": 3,
205
+ "160": 4,
206
+ "200": 5
207
+ },
208
+ "layer_norm_eps": 1e-12,
209
+ "max_position_embeddings": 512,
210
+ "model_type": "bert",
211
+ "num_attention_heads": 12,
212
+ "num_hidden_layers": 12,
213
+ "pad_token_id": 0,
214
+ "position_embedding_type": "absolute",
215
+ "torch_dtype": "float32",
216
+ "transformers_version": "4.53.1",
217
+ "type_vocab_size": 2,
218
+ "use_cache": true,
219
+ "vocab_size": 30522
220
+ }
221
+
222
+ [2025-07-09 17:06:27,214][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/model.safetensors
223
+ [2025-07-09 17:06:27,215][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
224
+ [2025-07-09 17:06:27,215][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
225
+ [2025-07-09 17:06:28,002][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at ricardoz/BERTugues-base-portuguese-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']
226
+ - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
227
+ - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
228
+ [2025-07-09 17:06:28,002][transformers.modeling_utils][WARNING] - Some weights of BertForSequenceClassification were not initialized from the model checkpoint at ricardoz/BERTugues-base-portuguese-cased and are newly initialized: ['classifier.bias', 'classifier.weight']
229
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
230
+ [2025-07-09 17:06:28,008][transformers.training_args][INFO] - PyTorch: setting up devices
231
+ [2025-07-09 17:06:28,033][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
232
+ [2025-07-09 17:06:28,041][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
233
+ [2025-07-09 17:06:28,063][transformers.trainer][INFO] - Using auto half precision backend
234
+ [2025-07-09 17:06:28,065][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
235
+ [2025-07-09 17:06:28,070][transformers.trainer][INFO] -
236
+ ***** Running Evaluation *****
237
+ [2025-07-09 17:06:28,071][transformers.trainer][INFO] - Num examples = 132
238
+ [2025-07-09 17:06:28,071][transformers.trainer][INFO] - Batch size = 16
239
+ [2025-07-09 17:06:29,180][transformers.trainer][INFO] - The following columns in the Training set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
240
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - ***** Running training *****
241
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Num examples = 500
242
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Num Epochs = 20
243
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Instantaneous batch size per device = 16
244
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
245
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Gradient Accumulation steps = 1
246
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Total optimization steps = 640
247
+ [2025-07-09 17:06:29,190][transformers.trainer][INFO] - Number of trainable parameters = 109,486,854
248
+ [2025-07-09 17:06:34,013][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
249
+ [2025-07-09 17:06:34,016][transformers.trainer][INFO] -
250
+ ***** Running Evaluation *****
251
+ [2025-07-09 17:06:34,016][transformers.trainer][INFO] - Num examples = 132
252
+ [2025-07-09 17:06:34,016][transformers.trainer][INFO] - Batch size = 16
253
+ [2025-07-09 17:06:34,411][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-32
254
+ [2025-07-09 17:06:34,413][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-32/config.json
255
+ [2025-07-09 17:06:35,388][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-32/model.safetensors
256
+ [2025-07-09 17:06:41,012][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
257
+ [2025-07-09 17:06:41,014][transformers.trainer][INFO] -
258
+ ***** Running Evaluation *****
259
+ [2025-07-09 17:06:41,015][transformers.trainer][INFO] - Num examples = 132
260
+ [2025-07-09 17:06:41,015][transformers.trainer][INFO] - Batch size = 16
261
+ [2025-07-09 17:06:41,409][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-64
262
+ [2025-07-09 17:06:41,411][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-64/config.json
263
+ [2025-07-09 17:06:42,414][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-64/model.safetensors
264
+ [2025-07-09 17:06:43,902][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-32] due to args.save_total_limit
265
+ [2025-07-09 17:06:48,508][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
266
+ [2025-07-09 17:06:48,511][transformers.trainer][INFO] -
267
+ ***** Running Evaluation *****
268
+ [2025-07-09 17:06:48,512][transformers.trainer][INFO] - Num examples = 132
269
+ [2025-07-09 17:06:48,512][transformers.trainer][INFO] - Batch size = 16
270
+ [2025-07-09 17:06:48,905][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-96
271
+ [2025-07-09 17:06:48,906][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-96/config.json
272
+ [2025-07-09 17:06:49,840][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-96/model.safetensors
273
+ [2025-07-09 17:06:50,703][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-64] due to args.save_total_limit
274
+ [2025-07-09 17:06:55,362][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
275
+ [2025-07-09 17:06:55,365][transformers.trainer][INFO] -
276
+ ***** Running Evaluation *****
277
+ [2025-07-09 17:06:55,365][transformers.trainer][INFO] - Num examples = 132
278
+ [2025-07-09 17:06:55,365][transformers.trainer][INFO] - Batch size = 16
279
+ [2025-07-09 17:06:55,757][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-128
280
+ [2025-07-09 17:06:55,759][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-128/config.json
281
+ [2025-07-09 17:06:56,597][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-128/model.safetensors
282
+ [2025-07-09 17:07:01,972][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
283
+ [2025-07-09 17:07:01,975][transformers.trainer][INFO] -
284
+ ***** Running Evaluation *****
285
+ [2025-07-09 17:07:01,975][transformers.trainer][INFO] - Num examples = 132
286
+ [2025-07-09 17:07:01,975][transformers.trainer][INFO] - Batch size = 16
287
+ [2025-07-09 17:07:02,367][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-160
288
+ [2025-07-09 17:07:02,368][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-160/config.json
289
+ [2025-07-09 17:07:03,428][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-160/model.safetensors
290
+ [2025-07-09 17:07:04,269][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-128] due to args.save_total_limit
291
+ [2025-07-09 17:07:08,895][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
292
+ [2025-07-09 17:07:08,897][transformers.trainer][INFO] -
293
+ ***** Running Evaluation *****
294
+ [2025-07-09 17:07:08,898][transformers.trainer][INFO] - Num examples = 132
295
+ [2025-07-09 17:07:08,898][transformers.trainer][INFO] - Batch size = 16
296
+ [2025-07-09 17:07:09,288][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-192
297
+ [2025-07-09 17:07:09,289][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-192/config.json
298
+ [2025-07-09 17:07:10,206][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-192/model.safetensors
299
+ [2025-07-09 17:07:11,160][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-160] due to args.save_total_limit
300
+ [2025-07-09 17:07:15,856][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
301
+ [2025-07-09 17:07:15,859][transformers.trainer][INFO] -
302
+ ***** Running Evaluation *****
303
+ [2025-07-09 17:07:15,859][transformers.trainer][INFO] - Num examples = 132
304
+ [2025-07-09 17:07:15,859][transformers.trainer][INFO] - Batch size = 16
305
+ [2025-07-09 17:07:16,282][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-224
306
+ [2025-07-09 17:07:16,283][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-224/config.json
307
+ [2025-07-09 17:07:17,131][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-224/model.safetensors
308
+ [2025-07-09 17:07:18,022][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-192] due to args.save_total_limit
309
+ [2025-07-09 17:07:22,789][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
310
+ [2025-07-09 17:07:22,792][transformers.trainer][INFO] -
311
+ ***** Running Evaluation *****
312
+ [2025-07-09 17:07:22,792][transformers.trainer][INFO] - Num examples = 132
313
+ [2025-07-09 17:07:22,792][transformers.trainer][INFO] - Batch size = 16
314
+ [2025-07-09 17:07:23,201][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-256
315
+ [2025-07-09 17:07:23,203][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-256/config.json
316
+ [2025-07-09 17:07:24,424][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-256/model.safetensors
317
+ [2025-07-09 17:07:25,309][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-96] due to args.save_total_limit
318
+ [2025-07-09 17:07:25,432][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-224] due to args.save_total_limit
319
+ [2025-07-09 17:07:30,129][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
320
+ [2025-07-09 17:07:30,131][transformers.trainer][INFO] -
321
+ ***** Running Evaluation *****
322
+ [2025-07-09 17:07:30,131][transformers.trainer][INFO] - Num examples = 132
323
+ [2025-07-09 17:07:30,131][transformers.trainer][INFO] - Batch size = 16
324
+ [2025-07-09 17:07:30,520][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-288
325
+ [2025-07-09 17:07:30,521][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-288/config.json
326
+ [2025-07-09 17:07:31,349][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-288/model.safetensors
327
+ [2025-07-09 17:07:36,863][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
328
+ [2025-07-09 17:07:36,866][transformers.trainer][INFO] -
329
+ ***** Running Evaluation *****
330
+ [2025-07-09 17:07:36,866][transformers.trainer][INFO] - Num examples = 132
331
+ [2025-07-09 17:07:36,866][transformers.trainer][INFO] - Batch size = 16
332
+ [2025-07-09 17:07:37,257][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-320
333
+ [2025-07-09 17:07:37,258][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-320/config.json
334
+ [2025-07-09 17:07:38,110][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-320/model.safetensors
335
+ [2025-07-09 17:07:38,960][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-256] due to args.save_total_limit
336
+ [2025-07-09 17:07:39,067][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-288] due to args.save_total_limit
337
+ [2025-07-09 17:07:43,736][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
338
+ [2025-07-09 17:07:43,739][transformers.trainer][INFO] -
339
+ ***** Running Evaluation *****
340
+ [2025-07-09 17:07:43,739][transformers.trainer][INFO] - Num examples = 132
341
+ [2025-07-09 17:07:43,739][transformers.trainer][INFO] - Batch size = 16
342
+ [2025-07-09 17:07:44,147][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-352
343
+ [2025-07-09 17:07:44,148][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-352/config.json
344
+ [2025-07-09 17:07:45,170][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-352/model.safetensors
345
+ [2025-07-09 17:07:46,159][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-320] due to args.save_total_limit
346
+ [2025-07-09 17:07:50,840][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
347
+ [2025-07-09 17:07:50,843][transformers.trainer][INFO] -
348
+ ***** Running Evaluation *****
349
+ [2025-07-09 17:07:50,843][transformers.trainer][INFO] - Num examples = 132
350
+ [2025-07-09 17:07:50,843][transformers.trainer][INFO] - Batch size = 16
351
+ [2025-07-09 17:07:51,241][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-384
352
+ [2025-07-09 17:07:51,242][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-384/config.json
353
+ [2025-07-09 17:07:52,176][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-384/model.safetensors
354
+ [2025-07-09 17:07:57,717][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
355
+ [2025-07-09 17:07:57,720][transformers.trainer][INFO] -
356
+ ***** Running Evaluation *****
357
+ [2025-07-09 17:07:57,720][transformers.trainer][INFO] - Num examples = 132
358
+ [2025-07-09 17:07:57,720][transformers.trainer][INFO] - Batch size = 16
359
+ [2025-07-09 17:07:58,119][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-416
360
+ [2025-07-09 17:07:58,121][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-416/config.json
361
+ [2025-07-09 17:07:59,069][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-416/model.safetensors
362
+ [2025-07-09 17:08:00,007][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-384] due to args.save_total_limit
363
+ [2025-07-09 17:08:04,702][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
364
+ [2025-07-09 17:08:04,705][transformers.trainer][INFO] -
365
+ ***** Running Evaluation *****
366
+ [2025-07-09 17:08:04,706][transformers.trainer][INFO] - Num examples = 132
367
+ [2025-07-09 17:08:04,706][transformers.trainer][INFO] - Batch size = 16
368
+ [2025-07-09 17:08:05,096][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-448
369
+ [2025-07-09 17:08:05,097][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-448/config.json
370
+ [2025-07-09 17:08:05,906][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-448/model.safetensors
371
+ [2025-07-09 17:08:06,837][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-416] due to args.save_total_limit
372
+ [2025-07-09 17:08:11,545][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
373
+ [2025-07-09 17:08:11,548][transformers.trainer][INFO] -
374
+ ***** Running Evaluation *****
375
+ [2025-07-09 17:08:11,548][transformers.trainer][INFO] - Num examples = 132
376
+ [2025-07-09 17:08:11,548][transformers.trainer][INFO] - Batch size = 16
377
+ [2025-07-09 17:08:11,937][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-480
378
+ [2025-07-09 17:08:11,938][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-480/config.json
379
+ [2025-07-09 17:08:12,811][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-480/model.safetensors
380
+ [2025-07-09 17:08:13,705][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-448] due to args.save_total_limit
381
+ [2025-07-09 17:08:18,361][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
382
+ [2025-07-09 17:08:18,363][transformers.trainer][INFO] -
383
+ ***** Running Evaluation *****
384
+ [2025-07-09 17:08:18,364][transformers.trainer][INFO] - Num examples = 132
385
+ [2025-07-09 17:08:18,364][transformers.trainer][INFO] - Batch size = 16
386
+ [2025-07-09 17:08:18,754][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-512
387
+ [2025-07-09 17:08:18,756][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-512/config.json
388
+ [2025-07-09 17:08:19,574][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-512/model.safetensors
389
+ [2025-07-09 17:08:20,476][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-480] due to args.save_total_limit
390
+ [2025-07-09 17:08:20,590][transformers.trainer][INFO] -
391
+
392
+ Training completed. Do not forget to share your model on huggingface.co/models =)
393
+
394
+
395
+ [2025-07-09 17:08:20,590][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-352 (score: 0.5530126109294722).
396
+ [2025-07-09 17:08:20,826][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-06-15/results/checkpoint-512] due to args.save_total_limit
397
+ [2025-07-09 17:08:20,953][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
398
+ [2025-07-09 17:08:20,957][transformers.trainer][INFO] -
399
+ ***** Running Evaluation *****
400
+ [2025-07-09 17:08:20,957][transformers.trainer][INFO] - Num examples = 132
401
+ [2025-07-09 17:08:20,957][transformers.trainer][INFO] - Batch size = 16
402
+ [2025-07-09 17:08:21,357][__main__][INFO] - Training completed successfully.
403
+ [2025-07-09 17:08:21,357][__main__][INFO] - Running on Test
404
+ [2025-07-09 17:08:21,357][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades. If id_prompt, prompt, essay_text, supporting_text, essay_year, reference, id, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
405
+ [2025-07-09 17:08:21,359][transformers.trainer][INFO] -
406
+ ***** Running Evaluation *****
407
+ [2025-07-09 17:08:21,359][transformers.trainer][INFO] - Num examples = 138
408
+ [2025-07-09 17:08:21,359][transformers.trainer][INFO] - Batch size = 16
409
+ [2025-07-09 17:08:21,755][__main__][INFO] - Test metrics: {'eval_loss': 1.6317094564437866, 'eval_model_preparation_time': 0.0022, 'eval_accuracy': 0.4927536231884058, 'eval_RMSE': 31.20757990421976, 'eval_QWK': 0.5408745247148289, 'eval_HDIV': 0.007246376811594235, 'eval_Macro_F1': 0.2885197389992787, 'eval_Micro_F1': 0.4927536231884058, 'eval_Weighted_F1': 0.5051079377027337, 'eval_TP_0': 0, 'eval_TN_0': 137, 'eval_FP_0': 0, 'eval_FN_0': 1, 'eval_TP_1': 0, 'eval_TN_1': 137, 'eval_FP_1': 0, 'eval_FN_1': 1, 'eval_TP_2': 7, 'eval_TN_2': 106, 'eval_FP_2': 23, 'eval_FN_2': 2, 'eval_TP_3': 41, 'eval_TN_3': 41, 'eval_FP_3': 21, 'eval_FN_3': 35, 'eval_TP_4': 17, 'eval_TN_4': 75, 'eval_FP_4': 17, 'eval_FN_4': 29, 'eval_TP_5': 3, 'eval_TN_5': 124, 'eval_FP_5': 9, 'eval_FN_5': 2, 'eval_runtime': 0.3914, 'eval_samples_per_second': 352.571, 'eval_steps_per_second': 22.994, 'epoch': 16.0}
410
+ [2025-07-09 17:08:21,755][transformers.trainer][INFO] - Saving model checkpoint to ./results/best_model
411
+ [2025-07-09 17:08:21,757][transformers.configuration_utils][INFO] - Configuration saved in ./results/best_model/config.json
412
+ [2025-07-09 17:08:22,927][transformers.modeling_utils][INFO] - Model weights saved in ./results/best_model/model.safetensors
413
+ [2025-07-09 17:08:22,929][transformers.tokenization_utils_base][INFO] - tokenizer config file saved in ./results/best_model/tokenizer_config.json
414
+ [2025-07-09 17:08:22,929][transformers.tokenization_utils_base][INFO] - Special tokens file saved in ./results/best_model/special_tokens_map.json
415
+ [2025-07-09 17:08:22,941][__main__][INFO] - Model and tokenizer saved to ./results/best_model
416
+ [2025-07-09 17:08:22,945][__main__][INFO] - Fine Tuning Finished.
417
+ [2025-07-09 17:08:23,454][__main__][INFO] - Total emissions: 0.0026 kg CO2eq
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e17d2cf5818eeb3796caceeb049dff425aa1ed7693c852da5dfcbebeafdcac86
3
+ size 5777
vocab.txt ADDED
The diff for this file is too large to render. See raw diff