princepride's picture
Final checkpoint
5f21a6f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 78,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01282051282051282,
"grad_norm": 23.535282117342813,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9389,
"step": 1
},
{
"epoch": 0.02564102564102564,
"grad_norm": 30.95165111144086,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.1153,
"step": 2
},
{
"epoch": 0.038461538461538464,
"grad_norm": 21.427477034083225,
"learning_rate": 2e-05,
"loss": 0.908,
"step": 3
},
{
"epoch": 0.05128205128205128,
"grad_norm": 17.928895689942703,
"learning_rate": 1.9991228300988586e-05,
"loss": 0.7206,
"step": 4
},
{
"epoch": 0.0641025641025641,
"grad_norm": 20.46581420059138,
"learning_rate": 1.9964928592495046e-05,
"loss": 0.8048,
"step": 5
},
{
"epoch": 0.07692307692307693,
"grad_norm": 9.533694744269097,
"learning_rate": 1.9921147013144782e-05,
"loss": 0.6909,
"step": 6
},
{
"epoch": 0.08974358974358974,
"grad_norm": 7.154372229051495,
"learning_rate": 1.985996037070505e-05,
"loss": 0.7208,
"step": 7
},
{
"epoch": 0.10256410256410256,
"grad_norm": 6.050595599777209,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.5383,
"step": 8
},
{
"epoch": 0.11538461538461539,
"grad_norm": 4.81529735005149,
"learning_rate": 1.9685831611286312e-05,
"loss": 0.5185,
"step": 9
},
{
"epoch": 0.1282051282051282,
"grad_norm": 5.904610892485658,
"learning_rate": 1.9573194975320672e-05,
"loss": 0.5649,
"step": 10
},
{
"epoch": 0.14102564102564102,
"grad_norm": 5.012548893301235,
"learning_rate": 1.944376370237481e-05,
"loss": 0.6025,
"step": 11
},
{
"epoch": 0.15384615384615385,
"grad_norm": 3.4664366084608105,
"learning_rate": 1.9297764858882516e-05,
"loss": 0.4299,
"step": 12
},
{
"epoch": 0.16666666666666666,
"grad_norm": 4.713790985225752,
"learning_rate": 1.913545457642601e-05,
"loss": 0.6949,
"step": 13
},
{
"epoch": 0.1794871794871795,
"grad_norm": 4.777031980778873,
"learning_rate": 1.895711760239413e-05,
"loss": 0.5172,
"step": 14
},
{
"epoch": 0.19230769230769232,
"grad_norm": 3.3607607535520345,
"learning_rate": 1.8763066800438638e-05,
"loss": 0.4786,
"step": 15
},
{
"epoch": 0.20512820512820512,
"grad_norm": 4.861766646485932,
"learning_rate": 1.855364260160507e-05,
"loss": 0.4538,
"step": 16
},
{
"epoch": 0.21794871794871795,
"grad_norm": 4.173446943768888,
"learning_rate": 1.8329212407100996e-05,
"loss": 0.6317,
"step": 17
},
{
"epoch": 0.23076923076923078,
"grad_norm": 3.857794925631418,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.5543,
"step": 18
},
{
"epoch": 0.24358974358974358,
"grad_norm": 3.589848386643151,
"learning_rate": 1.78369345732584e-05,
"loss": 0.5853,
"step": 19
},
{
"epoch": 0.2564102564102564,
"grad_norm": 5.135379268595713,
"learning_rate": 1.7569950556517566e-05,
"loss": 0.5254,
"step": 20
},
{
"epoch": 0.2692307692307692,
"grad_norm": 4.251176222586967,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.5944,
"step": 21
},
{
"epoch": 0.28205128205128205,
"grad_norm": 3.870818396954739,
"learning_rate": 1.6996633405133656e-05,
"loss": 0.5656,
"step": 22
},
{
"epoch": 0.2948717948717949,
"grad_norm": 3.342745967846155,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.5327,
"step": 23
},
{
"epoch": 0.3076923076923077,
"grad_norm": 4.502112335496065,
"learning_rate": 1.63742398974869e-05,
"loss": 0.5252,
"step": 24
},
{
"epoch": 0.32051282051282054,
"grad_norm": 4.61897270876649,
"learning_rate": 1.6045991148623752e-05,
"loss": 0.7316,
"step": 25
},
{
"epoch": 0.3333333333333333,
"grad_norm": 4.630378520341254,
"learning_rate": 1.570713567684432e-05,
"loss": 0.5961,
"step": 26
},
{
"epoch": 0.34615384615384615,
"grad_norm": 3.4435955161971847,
"learning_rate": 1.5358267949789968e-05,
"loss": 0.3898,
"step": 27
},
{
"epoch": 0.358974358974359,
"grad_norm": 3.6296714817244355,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.5917,
"step": 28
},
{
"epoch": 0.3717948717948718,
"grad_norm": 3.553076230658495,
"learning_rate": 1.463296035119862e-05,
"loss": 0.4686,
"step": 29
},
{
"epoch": 0.38461538461538464,
"grad_norm": 3.3205165192761865,
"learning_rate": 1.4257792915650728e-05,
"loss": 0.5327,
"step": 30
},
{
"epoch": 0.3974358974358974,
"grad_norm": 4.258971760384096,
"learning_rate": 1.3875155864521031e-05,
"loss": 0.5969,
"step": 31
},
{
"epoch": 0.41025641025641024,
"grad_norm": 4.160699064174649,
"learning_rate": 1.3485720473218153e-05,
"loss": 0.5726,
"step": 32
},
{
"epoch": 0.4230769230769231,
"grad_norm": 3.8858738820549443,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.481,
"step": 33
},
{
"epoch": 0.4358974358974359,
"grad_norm": 3.5923086165234364,
"learning_rate": 1.2689198206152657e-05,
"loss": 0.4831,
"step": 34
},
{
"epoch": 0.44871794871794873,
"grad_norm": 3.040207060233123,
"learning_rate": 1.2283508701106559e-05,
"loss": 0.4379,
"step": 35
},
{
"epoch": 0.46153846153846156,
"grad_norm": 4.835462956087315,
"learning_rate": 1.187381314585725e-05,
"loss": 0.5842,
"step": 36
},
{
"epoch": 0.47435897435897434,
"grad_norm": 3.430545038253428,
"learning_rate": 1.1460830285624119e-05,
"loss": 0.4721,
"step": 37
},
{
"epoch": 0.48717948717948717,
"grad_norm": 3.550867514464785,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.5327,
"step": 38
},
{
"epoch": 0.5,
"grad_norm": 4.25197469639109,
"learning_rate": 1.0627905195293135e-05,
"loss": 0.5851,
"step": 39
},
{
"epoch": 0.5128205128205128,
"grad_norm": 5.301567871121325,
"learning_rate": 1.0209424198833571e-05,
"loss": 0.6567,
"step": 40
},
{
"epoch": 0.5256410256410257,
"grad_norm": 4.76666007720682,
"learning_rate": 9.790575801166432e-06,
"loss": 0.5344,
"step": 41
},
{
"epoch": 0.5384615384615384,
"grad_norm": 4.628624999659393,
"learning_rate": 9.372094804706867e-06,
"loss": 0.5901,
"step": 42
},
{
"epoch": 0.5512820512820513,
"grad_norm": 2.7785526596724655,
"learning_rate": 8.954715367323468e-06,
"loss": 0.4797,
"step": 43
},
{
"epoch": 0.5641025641025641,
"grad_norm": 3.793194400617578,
"learning_rate": 8.539169714375885e-06,
"loss": 0.5728,
"step": 44
},
{
"epoch": 0.5769230769230769,
"grad_norm": 3.016128749634024,
"learning_rate": 8.126186854142752e-06,
"loss": 0.4808,
"step": 45
},
{
"epoch": 0.5897435897435898,
"grad_norm": 3.992077099503845,
"learning_rate": 7.716491298893443e-06,
"loss": 0.5197,
"step": 46
},
{
"epoch": 0.6025641025641025,
"grad_norm": 4.490369000424712,
"learning_rate": 7.310801793847344e-06,
"loss": 0.4184,
"step": 47
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.33159901322508,
"learning_rate": 6.909830056250527e-06,
"loss": 0.3481,
"step": 48
},
{
"epoch": 0.6282051282051282,
"grad_norm": 2.550092727870004,
"learning_rate": 6.5142795267818505e-06,
"loss": 0.3653,
"step": 49
},
{
"epoch": 0.6410256410256411,
"grad_norm": 2.6572708529729487,
"learning_rate": 6.124844135478971e-06,
"loss": 0.4036,
"step": 50
},
{
"epoch": 0.6538461538461539,
"grad_norm": 4.037851736171114,
"learning_rate": 5.742207084349274e-06,
"loss": 0.5075,
"step": 51
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.9926759956064877,
"learning_rate": 5.367039648801386e-06,
"loss": 0.4551,
"step": 52
},
{
"epoch": 0.6794871794871795,
"grad_norm": 2.8400263733822477,
"learning_rate": 5.000000000000003e-06,
"loss": 0.515,
"step": 53
},
{
"epoch": 0.6923076923076923,
"grad_norm": 3.8124291528622973,
"learning_rate": 4.641732050210032e-06,
"loss": 0.5086,
"step": 54
},
{
"epoch": 0.7051282051282052,
"grad_norm": 2.841356103484977,
"learning_rate": 4.292864323155684e-06,
"loss": 0.4376,
"step": 55
},
{
"epoch": 0.717948717948718,
"grad_norm": 3.142335016274588,
"learning_rate": 3.954008851376252e-06,
"loss": 0.4039,
"step": 56
},
{
"epoch": 0.7307692307692307,
"grad_norm": 2.852087372917465,
"learning_rate": 3.625760102513103e-06,
"loss": 0.5008,
"step": 57
},
{
"epoch": 0.7435897435897436,
"grad_norm": 3.324712819809194,
"learning_rate": 3.308693936411421e-06,
"loss": 0.4739,
"step": 58
},
{
"epoch": 0.7564102564102564,
"grad_norm": 2.9649459892785535,
"learning_rate": 3.003366594866345e-06,
"loss": 0.5892,
"step": 59
},
{
"epoch": 0.7692307692307693,
"grad_norm": 3.044018836047704,
"learning_rate": 2.7103137257858867e-06,
"loss": 0.4852,
"step": 60
},
{
"epoch": 0.782051282051282,
"grad_norm": 4.050018523402415,
"learning_rate": 2.4300494434824373e-06,
"loss": 0.591,
"step": 61
},
{
"epoch": 0.7948717948717948,
"grad_norm": 2.3281157099809398,
"learning_rate": 2.163065426741603e-06,
"loss": 0.4416,
"step": 62
},
{
"epoch": 0.8076923076923077,
"grad_norm": 2.9262958648242883,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.4393,
"step": 63
},
{
"epoch": 0.8205128205128205,
"grad_norm": 3.884834370223603,
"learning_rate": 1.6707875928990059e-06,
"loss": 0.5305,
"step": 64
},
{
"epoch": 0.8333333333333334,
"grad_norm": 3.5229544410053544,
"learning_rate": 1.446357398394934e-06,
"loss": 0.5226,
"step": 65
},
{
"epoch": 0.8461538461538461,
"grad_norm": 3.550541933958789,
"learning_rate": 1.2369331995613664e-06,
"loss": 0.6602,
"step": 66
},
{
"epoch": 0.8589743589743589,
"grad_norm": 3.077396885216065,
"learning_rate": 1.042882397605871e-06,
"loss": 0.4248,
"step": 67
},
{
"epoch": 0.8717948717948718,
"grad_norm": 3.1902818565830784,
"learning_rate": 8.645454235739903e-07,
"loss": 0.4756,
"step": 68
},
{
"epoch": 0.8846153846153846,
"grad_norm": 2.9790818845625346,
"learning_rate": 7.022351411174866e-07,
"loss": 0.4006,
"step": 69
},
{
"epoch": 0.8974358974358975,
"grad_norm": 4.103022950661447,
"learning_rate": 5.562362976251901e-07,
"loss": 0.5541,
"step": 70
},
{
"epoch": 0.9102564102564102,
"grad_norm": 3.414968428161163,
"learning_rate": 4.268050246793276e-07,
"loss": 0.5879,
"step": 71
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.992490474364374,
"learning_rate": 3.1416838871368925e-07,
"loss": 0.435,
"step": 72
},
{
"epoch": 0.9358974358974359,
"grad_norm": 3.207632291423965,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.4987,
"step": 73
},
{
"epoch": 0.9487179487179487,
"grad_norm": 2.9139113123937044,
"learning_rate": 1.400396292949513e-07,
"loss": 0.4156,
"step": 74
},
{
"epoch": 0.9615384615384616,
"grad_norm": 2.536721514944869,
"learning_rate": 7.885298685522235e-08,
"loss": 0.4533,
"step": 75
},
{
"epoch": 0.9743589743589743,
"grad_norm": 2.586955803538133,
"learning_rate": 3.50714075049563e-08,
"loss": 0.4356,
"step": 76
},
{
"epoch": 0.9871794871794872,
"grad_norm": 2.8019180608719463,
"learning_rate": 8.771699011416169e-09,
"loss": 0.3865,
"step": 77
},
{
"epoch": 1.0,
"grad_norm": 2.8737311595523893,
"learning_rate": 0.0,
"loss": 0.4296,
"step": 78
},
{
"epoch": 1.0,
"step": 78,
"total_flos": 112793145835520.0,
"train_loss": 0.5409963910396283,
"train_runtime": 7617.3016,
"train_samples_per_second": 1.147,
"train_steps_per_second": 0.01
}
],
"logging_steps": 1.0,
"max_steps": 78,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 112793145835520.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}