| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.08650519031141868, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00865051903114187, | |
| "grad_norm": 0.653178334236145, | |
| "learning_rate": 5.142857142857143e-05, | |
| "loss": 2.462, | |
| "mean_token_accuracy": 0.5231182813644409, | |
| "num_tokens": 20480.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01730103806228374, | |
| "grad_norm": 0.4405156075954437, | |
| "learning_rate": 0.00010857142857142856, | |
| "loss": 2.0497, | |
| "mean_token_accuracy": 0.5873411521315575, | |
| "num_tokens": 40960.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.025951557093425604, | |
| "grad_norm": 0.6160047054290771, | |
| "learning_rate": 0.00016571428571428575, | |
| "loss": 1.9284, | |
| "mean_token_accuracy": 0.6067448765039444, | |
| "num_tokens": 61440.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03460207612456748, | |
| "grad_norm": 0.4909583330154419, | |
| "learning_rate": 0.00019999371690018224, | |
| "loss": 1.7739, | |
| "mean_token_accuracy": 0.6309384137392045, | |
| "num_tokens": 81920.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04325259515570934, | |
| "grad_norm": 0.4441400170326233, | |
| "learning_rate": 0.00019992304109437157, | |
| "loss": 1.9242, | |
| "mean_token_accuracy": 0.6014662817120552, | |
| "num_tokens": 102400.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05190311418685121, | |
| "grad_norm": 0.5243574976921082, | |
| "learning_rate": 0.00019977389129787062, | |
| "loss": 1.8928, | |
| "mean_token_accuracy": 0.6027370542287827, | |
| "num_tokens": 122880.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06055363321799308, | |
| "grad_norm": 0.3737685978412628, | |
| "learning_rate": 0.00019954638464462175, | |
| "loss": 1.8107, | |
| "mean_token_accuracy": 0.6185728281736373, | |
| "num_tokens": 143360.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.06920415224913495, | |
| "grad_norm": 0.5457460284233093, | |
| "learning_rate": 0.00019924069980567822, | |
| "loss": 1.7227, | |
| "mean_token_accuracy": 0.6354838699102402, | |
| "num_tokens": 163840.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07785467128027682, | |
| "grad_norm": 0.41756778955459595, | |
| "learning_rate": 0.00019885707684888566, | |
| "loss": 1.7249, | |
| "mean_token_accuracy": 0.6385141760110855, | |
| "num_tokens": 184320.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.08650519031141868, | |
| "grad_norm": 0.39207473397254944, | |
| "learning_rate": 0.00019839581705034624, | |
| "loss": 1.6423, | |
| "mean_token_accuracy": 0.6423753708600998, | |
| "num_tokens": 204800.0, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1156, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3493553661542400.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |