nllb-fongbe-french-translation / trainer_state.json
nellaw's picture
Upload du modèle fine-tuné français → fongbe (BLEU 10.29)
6d623fd verified
{
"best_metric": 10.29,
"best_model_checkpoint": "/workspace/models/nllb_fongbe_finetuned/checkpoint-3000",
"epoch": 4.229820232640113,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14099400775467041,
"grad_norm": 5.562941551208496,
"learning_rate": 2.0000000000000003e-06,
"loss": 6.7192,
"step": 100
},
{
"epoch": 0.28198801550934083,
"grad_norm": 1.1616981029510498,
"learning_rate": 4.000000000000001e-06,
"loss": 5.1508,
"step": 200
},
{
"epoch": 0.4229820232640113,
"grad_norm": 1.268113136291504,
"learning_rate": 6e-06,
"loss": 4.6273,
"step": 300
},
{
"epoch": 0.5639760310186817,
"grad_norm": 1.2506167888641357,
"learning_rate": 8.000000000000001e-06,
"loss": 4.4506,
"step": 400
},
{
"epoch": 0.7049700387733522,
"grad_norm": 1.2293723821640015,
"learning_rate": 1e-05,
"loss": 4.3037,
"step": 500
},
{
"epoch": 0.7049700387733522,
"eval_bleu": 4.12,
"eval_loss": 4.100268840789795,
"eval_runtime": 2071.2386,
"eval_samples_per_second": 2.435,
"eval_steps_per_second": 0.153,
"step": 500
},
{
"epoch": 0.8459640465280226,
"grad_norm": 1.3162401914596558,
"learning_rate": 1.2e-05,
"loss": 4.165,
"step": 600
},
{
"epoch": 0.986958054282693,
"grad_norm": 1.1826772689819336,
"learning_rate": 1.4e-05,
"loss": 4.0721,
"step": 700
},
{
"epoch": 1.1279520620373633,
"grad_norm": 1.2221076488494873,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.9853,
"step": 800
},
{
"epoch": 1.2689460697920338,
"grad_norm": 1.2303624153137207,
"learning_rate": 1.8e-05,
"loss": 3.9144,
"step": 900
},
{
"epoch": 1.4099400775467044,
"grad_norm": 1.1688417196273804,
"learning_rate": 2e-05,
"loss": 3.8332,
"step": 1000
},
{
"epoch": 1.4099400775467044,
"eval_bleu": 5.31,
"eval_loss": 3.682112693786621,
"eval_runtime": 2266.4204,
"eval_samples_per_second": 2.225,
"eval_steps_per_second": 0.139,
"step": 1000
},
{
"epoch": 1.5509340853013747,
"grad_norm": 1.1778169870376587,
"learning_rate": 1.9214145383104128e-05,
"loss": 3.7497,
"step": 1100
},
{
"epoch": 1.691928093056045,
"grad_norm": 1.1701103448867798,
"learning_rate": 1.842829076620825e-05,
"loss": 3.7144,
"step": 1200
},
{
"epoch": 1.8329221008107155,
"grad_norm": 1.170053243637085,
"learning_rate": 1.764243614931238e-05,
"loss": 3.6683,
"step": 1300
},
{
"epoch": 1.973916108565386,
"grad_norm": 1.2748677730560303,
"learning_rate": 1.6856581532416506e-05,
"loss": 3.6192,
"step": 1400
},
{
"epoch": 2.1149101163200563,
"grad_norm": 1.180209994316101,
"learning_rate": 1.607072691552063e-05,
"loss": 3.553,
"step": 1500
},
{
"epoch": 2.1149101163200563,
"eval_bleu": 6.86,
"eval_loss": 3.45310640335083,
"eval_runtime": 2226.0872,
"eval_samples_per_second": 2.265,
"eval_steps_per_second": 0.142,
"step": 1500
},
{
"epoch": 2.2559041240747266,
"grad_norm": 1.1808658838272095,
"learning_rate": 1.5284872298624755e-05,
"loss": 3.4858,
"step": 1600
},
{
"epoch": 2.3968981318293974,
"grad_norm": 1.2947388887405396,
"learning_rate": 1.4499017681728882e-05,
"loss": 3.4897,
"step": 1700
},
{
"epoch": 2.5378921395840677,
"grad_norm": 1.2786301374435425,
"learning_rate": 1.3713163064833006e-05,
"loss": 3.4922,
"step": 1800
},
{
"epoch": 2.678886147338738,
"grad_norm": 1.2938331365585327,
"learning_rate": 1.2927308447937132e-05,
"loss": 3.4331,
"step": 1900
},
{
"epoch": 2.8198801550934087,
"grad_norm": 1.216395616531372,
"learning_rate": 1.2141453831041259e-05,
"loss": 3.4279,
"step": 2000
},
{
"epoch": 2.8198801550934087,
"eval_bleu": 8.51,
"eval_loss": 3.327824831008911,
"eval_runtime": 2139.0671,
"eval_samples_per_second": 2.358,
"eval_steps_per_second": 0.148,
"step": 2000
},
{
"epoch": 2.960874162848079,
"grad_norm": 1.1774195432662964,
"learning_rate": 1.1355599214145383e-05,
"loss": 3.4074,
"step": 2100
},
{
"epoch": 3.1018681706027493,
"grad_norm": 1.1595913171768188,
"learning_rate": 1.056974459724951e-05,
"loss": 3.3459,
"step": 2200
},
{
"epoch": 3.2428621783574196,
"grad_norm": 1.1857880353927612,
"learning_rate": 9.783889980353636e-06,
"loss": 3.3206,
"step": 2300
},
{
"epoch": 3.3838561861120904,
"grad_norm": 1.1783032417297363,
"learning_rate": 8.998035363457762e-06,
"loss": 3.3266,
"step": 2400
},
{
"epoch": 3.5248501938667607,
"grad_norm": 1.2441322803497314,
"learning_rate": 8.212180746561886e-06,
"loss": 3.299,
"step": 2500
},
{
"epoch": 3.5248501938667607,
"eval_bleu": 9.63,
"eval_loss": 3.2531518936157227,
"eval_runtime": 2099.6399,
"eval_samples_per_second": 2.402,
"eval_steps_per_second": 0.151,
"step": 2500
},
{
"epoch": 3.665844201621431,
"grad_norm": 1.1659629344940186,
"learning_rate": 7.4263261296660124e-06,
"loss": 3.2992,
"step": 2600
},
{
"epoch": 3.8068382093761013,
"grad_norm": 1.234079360961914,
"learning_rate": 6.640471512770138e-06,
"loss": 3.3154,
"step": 2700
},
{
"epoch": 3.947832217130772,
"grad_norm": 1.262974500656128,
"learning_rate": 5.854616895874263e-06,
"loss": 3.315,
"step": 2800
},
{
"epoch": 4.088826224885443,
"grad_norm": 1.254392385482788,
"learning_rate": 5.068762278978389e-06,
"loss": 3.2626,
"step": 2900
},
{
"epoch": 4.229820232640113,
"grad_norm": 1.2075427770614624,
"learning_rate": 4.282907662082515e-06,
"loss": 3.2603,
"step": 3000
},
{
"epoch": 4.229820232640113,
"eval_bleu": 10.29,
"eval_loss": 3.213658094406128,
"eval_runtime": 2092.8319,
"eval_samples_per_second": 2.41,
"eval_steps_per_second": 0.151,
"step": 3000
}
],
"logging_steps": 100,
"max_steps": 3545,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9262611430375424.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}