Upload fine-tuned model - BLEU: 0.1277
Browse files- .gitattributes +2 -0
- README.md +4 -4
- baseline_results.json +3 -3
- checkpoint-96/config.json +54 -0
- checkpoint-96/generation_config.json +17 -0
- checkpoint-96/model.safetensors +3 -0
- checkpoint-96/optimizer.pt +3 -0
- checkpoint-96/rng_state.pth +3 -0
- checkpoint-96/scaler.pt +3 -0
- checkpoint-96/scheduler.pt +3 -0
- checkpoint-96/source.spm +3 -0
- checkpoint-96/special_tokens_map.json +5 -0
- checkpoint-96/target.spm +3 -0
- checkpoint-96/tokenizer_config.json +39 -0
- checkpoint-96/trainer_state.json +50 -0
- checkpoint-96/training_args.bin +3 -0
- checkpoint-96/vocab.json +0 -0
- model.safetensors +1 -1
- test_results.json +13 -13
- train_results.json +4 -4
- training_args.bin +1 -1
- translation_config.json +1 -1
.gitattributes
CHANGED
|
@@ -43,3 +43,5 @@ source.spm filter=lfs diff=lfs merge=lfs -text
|
|
| 43 |
target.spm filter=lfs diff=lfs merge=lfs -text
|
| 44 |
checkpoint-128/source.spm filter=lfs diff=lfs merge=lfs -text
|
| 45 |
checkpoint-128/target.spm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 43 |
target.spm filter=lfs diff=lfs merge=lfs -text
|
| 44 |
checkpoint-128/source.spm filter=lfs diff=lfs merge=lfs -text
|
| 45 |
checkpoint-128/target.spm filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
checkpoint-96/source.spm filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
checkpoint-96/target.spm filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -40,13 +40,13 @@ The model was fine-tuned on a combination of:
|
|
| 40 |
## Performance
|
| 41 |
|
| 42 |
### Test Set (General Translation)
|
| 43 |
-
- **BLEU:** 0.
|
| 44 |
-
- **chrF:**
|
| 45 |
- **Improvement over baseline:** +0.0%
|
| 46 |
|
| 47 |
### Domain Evaluation (Call Transcriptions)
|
| 48 |
- **Domain BLEU:** 0.0000
|
| 49 |
-
- **Domain chrF:**
|
| 50 |
- **Domain COMET-QE:** 0.0000
|
| 51 |
|
| 52 |
*Domain metrics are evaluated on real 10-minute call transcriptions from child helplines.*
|
|
@@ -88,7 +88,7 @@ print(translation)
|
|
| 88 |
|
| 89 |
**Base Model:** Helsinki-NLP/opus-mt-mul-en
|
| 90 |
**Training Epochs:** 8
|
| 91 |
-
**Batch Size:**
|
| 92 |
**Learning Rate:** 3e-05
|
| 93 |
**Optimizer:** AdamW
|
| 94 |
**Hardware:** NVIDIA GPU with FP16 mixed precision
|
|
|
|
| 40 |
## Performance
|
| 41 |
|
| 42 |
### Test Set (General Translation)
|
| 43 |
+
- **BLEU:** 0.1277
|
| 44 |
+
- **chrF:** 32.30
|
| 45 |
- **Improvement over baseline:** +0.0%
|
| 46 |
|
| 47 |
### Domain Evaluation (Call Transcriptions)
|
| 48 |
- **Domain BLEU:** 0.0000
|
| 49 |
+
- **Domain chrF:** 1.86
|
| 50 |
- **Domain COMET-QE:** 0.0000
|
| 51 |
|
| 52 |
*Domain metrics are evaluated on real 10-minute call transcriptions from child helplines.*
|
|
|
|
| 88 |
|
| 89 |
**Base Model:** Helsinki-NLP/opus-mt-mul-en
|
| 90 |
**Training Epochs:** 8
|
| 91 |
+
**Batch Size:** 550 (effective: 550)
|
| 92 |
**Learning Rate:** 3e-05
|
| 93 |
**Optimizer:** AdamW
|
| 94 |
**Hardware:** NVIDIA GPU with FP16 mixed precision
|
baseline_results.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
{
|
| 2 |
"eval_loss": 18.773775100708008,
|
| 3 |
-
"eval_model_preparation_time": 0.
|
| 4 |
"eval_bleu": 0.0317523275253792,
|
| 5 |
"eval_chrf": 19.994361360713377,
|
| 6 |
-
"eval_runtime": 18.
|
| 7 |
-
"eval_samples_per_second": 19.
|
| 8 |
"eval_steps_per_second": 0.054,
|
| 9 |
"eval_comet": 0.0341746505332392,
|
| 10 |
"eval_comet_std": 0.1127765594779826,
|
|
|
|
| 1 |
{
|
| 2 |
"eval_loss": 18.773775100708008,
|
| 3 |
+
"eval_model_preparation_time": 0.0027,
|
| 4 |
"eval_bleu": 0.0317523275253792,
|
| 5 |
"eval_chrf": 19.994361360713377,
|
| 6 |
+
"eval_runtime": 18.4416,
|
| 7 |
+
"eval_samples_per_second": 19.521,
|
| 8 |
"eval_steps_per_second": 0.054,
|
| 9 |
"eval_comet": 0.0341746505332392,
|
| 10 |
"eval_comet_std": 0.1127765594779826,
|
checkpoint-96/config.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"activation_dropout": 0.0,
|
| 3 |
+
"activation_function": "swish",
|
| 4 |
+
"add_bias_logits": false,
|
| 5 |
+
"add_final_layer_norm": false,
|
| 6 |
+
"architectures": [
|
| 7 |
+
"MarianMTModel"
|
| 8 |
+
],
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"classif_dropout": 0.0,
|
| 11 |
+
"classifier_dropout": 0.0,
|
| 12 |
+
"d_model": 512,
|
| 13 |
+
"decoder_attention_heads": 8,
|
| 14 |
+
"decoder_ffn_dim": 2048,
|
| 15 |
+
"decoder_layerdrop": 0.0,
|
| 16 |
+
"decoder_layers": 6,
|
| 17 |
+
"decoder_start_token_id": 64171,
|
| 18 |
+
"decoder_vocab_size": 64172,
|
| 19 |
+
"dropout": 0.1,
|
| 20 |
+
"dtype": "float32",
|
| 21 |
+
"encoder_attention_heads": 8,
|
| 22 |
+
"encoder_ffn_dim": 2048,
|
| 23 |
+
"encoder_layerdrop": 0.0,
|
| 24 |
+
"encoder_layers": 6,
|
| 25 |
+
"eos_token_id": 0,
|
| 26 |
+
"extra_pos_embeddings": 64172,
|
| 27 |
+
"forced_eos_token_id": 0,
|
| 28 |
+
"id2label": {
|
| 29 |
+
"0": "LABEL_0",
|
| 30 |
+
"1": "LABEL_1",
|
| 31 |
+
"2": "LABEL_2"
|
| 32 |
+
},
|
| 33 |
+
"init_std": 0.02,
|
| 34 |
+
"is_encoder_decoder": true,
|
| 35 |
+
"label2id": {
|
| 36 |
+
"LABEL_0": 0,
|
| 37 |
+
"LABEL_1": 1,
|
| 38 |
+
"LABEL_2": 2
|
| 39 |
+
},
|
| 40 |
+
"max_length": null,
|
| 41 |
+
"max_position_embeddings": 512,
|
| 42 |
+
"model_type": "marian",
|
| 43 |
+
"normalize_before": false,
|
| 44 |
+
"normalize_embedding": false,
|
| 45 |
+
"num_beams": null,
|
| 46 |
+
"num_hidden_layers": 6,
|
| 47 |
+
"pad_token_id": 64171,
|
| 48 |
+
"scale_embedding": true,
|
| 49 |
+
"share_encoder_decoder_embeddings": true,
|
| 50 |
+
"static_position_embeddings": true,
|
| 51 |
+
"transformers_version": "4.57.1",
|
| 52 |
+
"use_cache": true,
|
| 53 |
+
"vocab_size": 64172
|
| 54 |
+
}
|
checkpoint-96/generation_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bad_words_ids": [
|
| 3 |
+
[
|
| 4 |
+
64171
|
| 5 |
+
]
|
| 6 |
+
],
|
| 7 |
+
"decoder_start_token_id": 64171,
|
| 8 |
+
"eos_token_id": [
|
| 9 |
+
0
|
| 10 |
+
],
|
| 11 |
+
"forced_eos_token_id": 0,
|
| 12 |
+
"max_length": 512,
|
| 13 |
+
"num_beams": 6,
|
| 14 |
+
"pad_token_id": 64171,
|
| 15 |
+
"renormalize_logits": true,
|
| 16 |
+
"transformers_version": "4.57.1"
|
| 17 |
+
}
|
checkpoint-96/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e1c9e6b6073e8a0fc83c1a11432fbadf83a5c32033b36823091975ede4c568f
|
| 3 |
+
size 308263984
|
checkpoint-96/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98a43be8f8d6ec5dc6739423b9744963df61bcb70c99ace839fc38350e209a2c
|
| 3 |
+
size 616171979
|
checkpoint-96/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abe68b8d07bb856812fa822df28c97b9df77fbbe797028ee7cd9f8e7dd846374
|
| 3 |
+
size 14645
|
checkpoint-96/scaler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68a92e81e21ef52284b52c64167c5820c17700eece1dc77ea3e5c9144c32e6c5
|
| 3 |
+
size 1383
|
checkpoint-96/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b12b4be9f022ed85c83fe8574cc4bbe781b092616f5f3b78c3b587e32292c49
|
| 3 |
+
size 1465
|
checkpoint-96/source.spm
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4a99ea3602b29fbf901ade8b93a45efa3d7c64eab8fc5fa812383efa327a87d
|
| 3 |
+
size 706917
|
checkpoint-96/special_tokens_map.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"eos_token": "</s>",
|
| 3 |
+
"pad_token": "<pad>",
|
| 4 |
+
"unk_token": "<unk>"
|
| 5 |
+
}
|
checkpoint-96/target.spm
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6dce5fa58fcd7dde9e81e279b8c075bf42ee558278f73d6fb48e342029d7f19
|
| 3 |
+
size 791194
|
checkpoint-96/tokenizer_config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "</s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<unk>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"64171": {
|
| 20 |
+
"content": "<pad>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"clean_up_tokenization_spaces": false,
|
| 29 |
+
"eos_token": "</s>",
|
| 30 |
+
"extra_special_tokens": {},
|
| 31 |
+
"model_max_length": 512,
|
| 32 |
+
"pad_token": "<pad>",
|
| 33 |
+
"separate_vocabs": false,
|
| 34 |
+
"source_lang": "mul",
|
| 35 |
+
"sp_model_kwargs": {},
|
| 36 |
+
"target_lang": "eng",
|
| 37 |
+
"tokenizer_class": "MarianTokenizer",
|
| 38 |
+
"unk_token": "<unk>"
|
| 39 |
+
}
|
checkpoint-96/trainer_state.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 8.0,
|
| 6 |
+
"eval_steps": 1000,
|
| 7 |
+
"global_step": 96,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 4.166666666666667,
|
| 14 |
+
"grad_norm": 2.2876839637756348,
|
| 15 |
+
"learning_rate": 1.7184017516025075e-05,
|
| 16 |
+
"loss": 6.4599,
|
| 17 |
+
"step": 50
|
| 18 |
+
}
|
| 19 |
+
],
|
| 20 |
+
"logging_steps": 50,
|
| 21 |
+
"max_steps": 96,
|
| 22 |
+
"num_input_tokens_seen": 0,
|
| 23 |
+
"num_train_epochs": 8,
|
| 24 |
+
"save_steps": 1000,
|
| 25 |
+
"stateful_callbacks": {
|
| 26 |
+
"EarlyStoppingCallback": {
|
| 27 |
+
"args": {
|
| 28 |
+
"early_stopping_patience": 5,
|
| 29 |
+
"early_stopping_threshold": 0.001
|
| 30 |
+
},
|
| 31 |
+
"attributes": {
|
| 32 |
+
"early_stopping_patience_counter": 0
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"TrainerControl": {
|
| 36 |
+
"args": {
|
| 37 |
+
"should_epoch_stop": false,
|
| 38 |
+
"should_evaluate": false,
|
| 39 |
+
"should_log": false,
|
| 40 |
+
"should_save": true,
|
| 41 |
+
"should_training_stop": true
|
| 42 |
+
},
|
| 43 |
+
"attributes": {}
|
| 44 |
+
}
|
| 45 |
+
},
|
| 46 |
+
"total_flos": 1138408789377024.0,
|
| 47 |
+
"train_batch_size": 550,
|
| 48 |
+
"trial_name": null,
|
| 49 |
+
"trial_params": null
|
| 50 |
+
}
|
checkpoint-96/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb0b9e675407ca1a9ee1a08d6b6b650e16ca2de15fd3b2c37137b42aedec39fa
|
| 3 |
+
size 5969
|
checkpoint-96/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 308263984
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e1c9e6b6073e8a0fc83c1a11432fbadf83a5c32033b36823091975ede4c568f
|
| 3 |
size 308263984
|
test_results.json
CHANGED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
{
|
| 2 |
-
"eval_loss": 3.
|
| 3 |
-
"eval_model_preparation_time": 0.
|
| 4 |
-
"eval_bleu": 0.
|
| 5 |
-
"eval_chrf":
|
| 6 |
-
"eval_runtime":
|
| 7 |
-
"eval_samples_per_second":
|
| 8 |
-
"eval_steps_per_second": 0.
|
| 9 |
"epoch": 8.0,
|
| 10 |
-
"eval_comet": 0.
|
| 11 |
-
"eval_comet_std": 0.
|
| 12 |
-
"domain_bleu":
|
| 13 |
-
"domain_chrf":
|
| 14 |
-
"domain_comet": 4.
|
| 15 |
-
"domain_comet_std": 3.
|
| 16 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"eval_loss": 3.5031542778015137,
|
| 3 |
+
"eval_model_preparation_time": 0.0027,
|
| 4 |
+
"eval_bleu": 0.1276835459626037,
|
| 5 |
+
"eval_chrf": 32.29660565619225,
|
| 6 |
+
"eval_runtime": 4.0246,
|
| 7 |
+
"eval_samples_per_second": 89.449,
|
| 8 |
+
"eval_steps_per_second": 0.248,
|
| 9 |
"epoch": 8.0,
|
| 10 |
+
"eval_comet": 0.08929400351530098,
|
| 11 |
+
"eval_comet_std": 0.1904605300394193,
|
| 12 |
+
"domain_bleu": 8.850691462561237e-18,
|
| 13 |
+
"domain_chrf": 1.8573826171296468,
|
| 14 |
+
"domain_comet": 4.175902431597933e-05,
|
| 15 |
+
"domain_comet_std": 3.194240118007998e-07
|
| 16 |
}
|
train_results.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
-
"train_runtime":
|
| 3 |
-
"train_samples_per_second":
|
| 4 |
-
"train_steps_per_second":
|
| 5 |
"total_flos": 1138408789377024.0,
|
| 6 |
-
"train_loss":
|
| 7 |
"epoch": 8.0
|
| 8 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"train_runtime": 62.295,
|
| 3 |
+
"train_samples_per_second": 784.14,
|
| 4 |
+
"train_steps_per_second": 1.541,
|
| 5 |
"total_flos": 1138408789377024.0,
|
| 6 |
+
"train_loss": 5.05125633875529,
|
| 7 |
"epoch": 8.0
|
| 8 |
}
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5969
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb0b9e675407ca1a9ee1a08d6b6b650e16ca2de15fd3b2c37137b42aedec39fa
|
| 3 |
size 5969
|
translation_config.json
CHANGED
|
@@ -30,7 +30,7 @@
|
|
| 30 |
},
|
| 31 |
"training_config": {
|
| 32 |
"learning_rate": 3e-05,
|
| 33 |
-
"batch_size":
|
| 34 |
"gradient_accumulation_steps": 1,
|
| 35 |
"num_epochs": 8,
|
| 36 |
"max_length": 300,
|
|
|
|
| 30 |
},
|
| 31 |
"training_config": {
|
| 32 |
"learning_rate": 3e-05,
|
| 33 |
+
"batch_size": 550,
|
| 34 |
"gradient_accumulation_steps": 1,
|
| 35 |
"num_epochs": 8,
|
| 36 |
"max_length": 300,
|