| { | |
| "best_global_step": 150, | |
| "best_metric": 0.023379, | |
| "best_model_checkpoint": "/home/heimer_lan/ms-swift/output/MDC_advice_info_removeEmpty_lmdeploy_0.10.2/v1-20251105-155749/checkpoint-150", | |
| "epoch": 7.900662251655629, | |
| "eval_steps": 50, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.052980132450331126, | |
| "grad_norm": 0.03953635320067406, | |
| "learning_rate": 1e-05, | |
| "loss": 0.17028190195560455, | |
| "step": 1, | |
| "token_acc": 0.9633507853403142 | |
| }, | |
| { | |
| "epoch": 0.26490066225165565, | |
| "grad_norm": 0.029568403959274292, | |
| "learning_rate": 5e-05, | |
| "loss": 0.18914003670215607, | |
| "step": 5, | |
| "token_acc": 0.9590987370838117 | |
| }, | |
| { | |
| "epoch": 0.5298013245033113, | |
| "grad_norm": 0.021308299154043198, | |
| "learning_rate": 0.0001, | |
| "loss": 0.15191045999526978, | |
| "step": 10, | |
| "token_acc": 0.9626760563380282 | |
| }, | |
| { | |
| "epoch": 0.7947019867549668, | |
| "grad_norm": 0.002610551891848445, | |
| "learning_rate": 9.980973490458728e-05, | |
| "loss": 0.04974102675914764, | |
| "step": 15, | |
| "token_acc": 0.9847401557998079 | |
| }, | |
| { | |
| "epoch": 1.0529801324503312, | |
| "grad_norm": 0.022480137646198273, | |
| "learning_rate": 9.924038765061042e-05, | |
| "loss": 0.04363967180252075, | |
| "step": 20, | |
| "token_acc": 0.9864999446719044 | |
| }, | |
| { | |
| "epoch": 1.3178807947019868, | |
| "grad_norm": 0.008138323202729225, | |
| "learning_rate": 9.829629131445342e-05, | |
| "loss": 0.04447891116142273, | |
| "step": 25, | |
| "token_acc": 0.9850330664810303 | |
| }, | |
| { | |
| "epoch": 1.5827814569536423, | |
| "grad_norm": 0.02142256312072277, | |
| "learning_rate": 9.698463103929542e-05, | |
| "loss": 0.036640477180480954, | |
| "step": 30, | |
| "token_acc": 0.9891239392852874 | |
| }, | |
| { | |
| "epoch": 1.847682119205298, | |
| "grad_norm": 0.011074935086071491, | |
| "learning_rate": 9.53153893518325e-05, | |
| "loss": 0.030086925625801085, | |
| "step": 35, | |
| "token_acc": 0.9916445893621357 | |
| }, | |
| { | |
| "epoch": 2.1059602649006623, | |
| "grad_norm": 0.01294065359979868, | |
| "learning_rate": 9.330127018922194e-05, | |
| "loss": 0.029381152987480164, | |
| "step": 40, | |
| "token_acc": 0.9896373056994818 | |
| }, | |
| { | |
| "epoch": 2.370860927152318, | |
| "grad_norm": 0.04500681534409523, | |
| "learning_rate": 9.09576022144496e-05, | |
| "loss": 0.033383610844612124, | |
| "step": 45, | |
| "token_acc": 0.9900879830716115 | |
| }, | |
| { | |
| "epoch": 2.6357615894039736, | |
| "grad_norm": 0.0042345100082457066, | |
| "learning_rate": 8.83022221559489e-05, | |
| "loss": 0.03602198660373688, | |
| "step": 50, | |
| "token_acc": 0.9897824257723284 | |
| }, | |
| { | |
| "epoch": 2.6357615894039736, | |
| "eval_loss": 0.04839920997619629, | |
| "eval_runtime": 4.2632, | |
| "eval_samples_per_second": 9.383, | |
| "eval_steps_per_second": 9.383, | |
| "eval_token_acc": 0.9913308341143393, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.9006622516556293, | |
| "grad_norm": 0.0069412291049957275, | |
| "learning_rate": 8.535533905932738e-05, | |
| "loss": 0.023833152651786805, | |
| "step": 55, | |
| "token_acc": 0.9919428862825089 | |
| }, | |
| { | |
| "epoch": 3.1589403973509933, | |
| "grad_norm": 0.007548158057034016, | |
| "learning_rate": 8.213938048432697e-05, | |
| "loss": 0.025321054458618163, | |
| "step": 60, | |
| "token_acc": 0.9920453175846692 | |
| }, | |
| { | |
| "epoch": 3.423841059602649, | |
| "grad_norm": 0.012588719837367535, | |
| "learning_rate": 7.86788218175523e-05, | |
| "loss": 0.019796335697174074, | |
| "step": 65, | |
| "token_acc": 0.9945521900196121 | |
| }, | |
| { | |
| "epoch": 3.6887417218543046, | |
| "grad_norm": 0.007881580851972103, | |
| "learning_rate": 7.500000000000001e-05, | |
| "loss": 0.023856239020824434, | |
| "step": 70, | |
| "token_acc": 0.9931333192478343 | |
| }, | |
| { | |
| "epoch": 3.9536423841059603, | |
| "grad_norm": 0.00023895305639598519, | |
| "learning_rate": 7.113091308703498e-05, | |
| "loss": 0.04132827818393707, | |
| "step": 75, | |
| "token_acc": 0.9889129910821884 | |
| }, | |
| { | |
| "epoch": 4.211920529801325, | |
| "grad_norm": 0.027325641363859177, | |
| "learning_rate": 6.710100716628344e-05, | |
| "loss": 0.022271314263343812, | |
| "step": 80, | |
| "token_acc": 0.99291975724882 | |
| }, | |
| { | |
| "epoch": 4.47682119205298, | |
| "grad_norm": 0.014654102735221386, | |
| "learning_rate": 6.294095225512603e-05, | |
| "loss": 0.027605202794075013, | |
| "step": 85, | |
| "token_acc": 0.9918279569892473 | |
| }, | |
| { | |
| "epoch": 4.741721854304636, | |
| "grad_norm": 0.0015664997044950724, | |
| "learning_rate": 5.868240888334653e-05, | |
| "loss": 0.021871477365493774, | |
| "step": 90, | |
| "token_acc": 0.9924734861443723 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.016345158219337463, | |
| "learning_rate": 5.435778713738292e-05, | |
| "loss": 0.019794289767742158, | |
| "step": 95, | |
| "token_acc": 0.9939984146755747 | |
| }, | |
| { | |
| "epoch": 5.264900662251655, | |
| "grad_norm": 0.009140064008533955, | |
| "learning_rate": 5e-05, | |
| "loss": 0.023825842142105102, | |
| "step": 100, | |
| "token_acc": 0.992155104785386 | |
| }, | |
| { | |
| "epoch": 5.264900662251655, | |
| "eval_loss": 0.023827295750379562, | |
| "eval_runtime": 4.2312, | |
| "eval_samples_per_second": 9.454, | |
| "eval_steps_per_second": 9.454, | |
| "eval_token_acc": 0.992970946579194, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.529801324503311, | |
| "grad_norm": 0.0024570152163505554, | |
| "learning_rate": 4.564221286261709e-05, | |
| "loss": 0.019347120821475983, | |
| "step": 105, | |
| "token_acc": 0.9931521739130434 | |
| }, | |
| { | |
| "epoch": 5.7947019867549665, | |
| "grad_norm": 0.0027291898149996996, | |
| "learning_rate": 4.131759111665349e-05, | |
| "loss": 0.023801177740097046, | |
| "step": 110, | |
| "token_acc": 0.992756804214223 | |
| }, | |
| { | |
| "epoch": 6.052980132450331, | |
| "grad_norm": 0.004985196981579065, | |
| "learning_rate": 3.705904774487396e-05, | |
| "loss": 0.020168283581733705, | |
| "step": 115, | |
| "token_acc": 0.9940895658104114 | |
| }, | |
| { | |
| "epoch": 6.317880794701987, | |
| "grad_norm": 0.011491105891764164, | |
| "learning_rate": 3.289899283371657e-05, | |
| "loss": 0.021701890230178832, | |
| "step": 120, | |
| "token_acc": 0.994229722373435 | |
| }, | |
| { | |
| "epoch": 6.582781456953643, | |
| "grad_norm": 0.004598461091518402, | |
| "learning_rate": 2.886908691296504e-05, | |
| "loss": 0.023064079880714416, | |
| "step": 125, | |
| "token_acc": 0.9941222135965398 | |
| }, | |
| { | |
| "epoch": 6.847682119205298, | |
| "grad_norm": 0.0035953347105532885, | |
| "learning_rate": 2.500000000000001e-05, | |
| "loss": 0.01723257005214691, | |
| "step": 130, | |
| "token_acc": 0.9942478429411029 | |
| }, | |
| { | |
| "epoch": 7.105960264900662, | |
| "grad_norm": 0.0032730833627283573, | |
| "learning_rate": 2.132117818244771e-05, | |
| "loss": 0.020366857945919036, | |
| "step": 135, | |
| "token_acc": 0.9938676707907477 | |
| }, | |
| { | |
| "epoch": 7.370860927152318, | |
| "grad_norm": 0.00479909498244524, | |
| "learning_rate": 1.7860619515673033e-05, | |
| "loss": 0.018506327271461488, | |
| "step": 140, | |
| "token_acc": 0.994423377202766 | |
| }, | |
| { | |
| "epoch": 7.635761589403973, | |
| "grad_norm": 0.002990534296259284, | |
| "learning_rate": 1.4644660940672627e-05, | |
| "loss": 0.02147827297449112, | |
| "step": 145, | |
| "token_acc": 0.9936905143834884 | |
| }, | |
| { | |
| "epoch": 7.900662251655629, | |
| "grad_norm": 0.014075534418225288, | |
| "learning_rate": 1.1697777844051105e-05, | |
| "loss": 0.017816001176834108, | |
| "step": 150, | |
| "token_acc": 0.9948877528339631 | |
| }, | |
| { | |
| "epoch": 7.900662251655629, | |
| "eval_loss": 0.023379003629088402, | |
| "eval_runtime": 4.255, | |
| "eval_samples_per_second": 9.401, | |
| "eval_steps_per_second": 9.401, | |
| "eval_token_acc": 0.9936738519212746, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 190, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |