| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.08, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0004, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 31.350610733032227, | |
| "kl": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": -0.0, | |
| "num_tokens": 8880.0, | |
| "reward": 0.8125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.8125, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 1 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0008, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 40.88920593261719, | |
| "kl": 0.0, | |
| "learning_rate": 4e-08, | |
| "loss": 0.0, | |
| "num_tokens": 17760.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 2 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0012, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 41.24170684814453, | |
| "kl": 0.0013443923089653254, | |
| "learning_rate": 8e-08, | |
| "loss": 0.0, | |
| "num_tokens": 26640.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 3 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0016, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 0.025073383003473282, | |
| "kl": 0.0012363345449557528, | |
| "learning_rate": 1.2e-07, | |
| "loss": 0.0, | |
| "num_tokens": 35520.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 4 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.002, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 45.65277862548828, | |
| "kl": 0.0009983214986277744, | |
| "learning_rate": 1.6e-07, | |
| "loss": 0.0, | |
| "num_tokens": 44400.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 5 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0024, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 40.60847854614258, | |
| "kl": 0.0010500029457034543, | |
| "learning_rate": 2e-07, | |
| "loss": 0.0, | |
| "num_tokens": 53280.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 6 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0028, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.455440521240234, | |
| "kl": 0.0013627598236780614, | |
| "learning_rate": 2.4e-07, | |
| "loss": 0.0, | |
| "num_tokens": 62160.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 7 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0032, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 49.363807678222656, | |
| "kl": 0.0019289999618194997, | |
| "learning_rate": 2.8e-07, | |
| "loss": 0.0, | |
| "num_tokens": 71040.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 8 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0036, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 0.055168308317661285, | |
| "kl": 0.0023864044342190027, | |
| "learning_rate": 3.2e-07, | |
| "loss": 0.0, | |
| "num_tokens": 79920.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 9 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.004, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 27.4397029876709, | |
| "kl": 0.0029534806380979717, | |
| "learning_rate": 3.6e-07, | |
| "loss": 0.0, | |
| "num_tokens": 88800.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 10 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0044, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 34.00825881958008, | |
| "kl": 0.004601971711963415, | |
| "learning_rate": 4e-07, | |
| "loss": 0.0, | |
| "num_tokens": 97680.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 11 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0048, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 41.115177154541016, | |
| "kl": 0.005590100074186921, | |
| "learning_rate": 4.3999999999999997e-07, | |
| "loss": 0.0, | |
| "num_tokens": 106560.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 12 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0052, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.690364837646484, | |
| "kl": 0.008083905442617834, | |
| "learning_rate": 4.8e-07, | |
| "loss": 0.0, | |
| "num_tokens": 115440.0, | |
| "reward": 0.25, | |
| "reward_std": 0.28867512941360474, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 13 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0056, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 44.36626052856445, | |
| "kl": 0.01064743846654892, | |
| "learning_rate": 5.2e-07, | |
| "loss": 0.0, | |
| "num_tokens": 124320.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 14 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.006, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 35.651023864746094, | |
| "kl": 0.01316027413122356, | |
| "learning_rate": 5.6e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 133200.0, | |
| "reward": 0.75, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.75, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 15 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0064, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 52.94963836669922, | |
| "kl": 0.020378376357257366, | |
| "learning_rate": 6e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 142080.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 16 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0068, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 36.93665313720703, | |
| "kl": 0.025172261521220207, | |
| "learning_rate": 6.4e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 150960.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 17 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0072, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 0.5544130206108093, | |
| "kl": 0.032024398911744356, | |
| "learning_rate": 6.800000000000001e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 159840.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 18 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0076, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 44.66011047363281, | |
| "kl": 0.039961141999810934, | |
| "learning_rate": 7.2e-07, | |
| "loss": 0.0002, | |
| "num_tokens": 168720.0, | |
| "reward": 0.25, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 19 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.008, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 28.504152297973633, | |
| "kl": 0.036296515725553036, | |
| "learning_rate": 7.599999999999999e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 177600.0, | |
| "reward": 0.9375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.9375, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 20 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0084, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 50.410888671875, | |
| "kl": 0.04341549798846245, | |
| "learning_rate": 8e-07, | |
| "loss": 0.0002, | |
| "num_tokens": 186480.0, | |
| "reward": 0.375, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 21 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0088, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 51.3606071472168, | |
| "kl": 0.050860715098679066, | |
| "learning_rate": 8.399999999999999e-07, | |
| "loss": 0.0002, | |
| "num_tokens": 195360.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 22 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0092, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 24.91278076171875, | |
| "kl": 0.06213559303432703, | |
| "learning_rate": 8.799999999999999e-07, | |
| "loss": 0.0002, | |
| "num_tokens": 204240.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 23 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0096, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 39.62419891357422, | |
| "kl": 0.0647846395149827, | |
| "learning_rate": 9.2e-07, | |
| "loss": 0.0003, | |
| "num_tokens": 213120.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 24 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.01, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 63.561767578125, | |
| "kl": 0.0892756637185812, | |
| "learning_rate": 9.6e-07, | |
| "loss": 0.0004, | |
| "num_tokens": 222000.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 25 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0104, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 42.179954528808594, | |
| "kl": 0.10400468856096268, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0004, | |
| "num_tokens": 230880.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 26 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0108, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 25.707307815551758, | |
| "kl": 0.1136582437902689, | |
| "learning_rate": 1.04e-06, | |
| "loss": 0.0005, | |
| "num_tokens": 239760.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 27 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0112, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 26.41197967529297, | |
| "kl": 0.13468273170292377, | |
| "learning_rate": 1.08e-06, | |
| "loss": 0.0005, | |
| "num_tokens": 248640.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 28 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0116, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 35.851348876953125, | |
| "kl": 0.1477714516222477, | |
| "learning_rate": 1.12e-06, | |
| "loss": 0.0006, | |
| "num_tokens": 257520.0, | |
| "reward": 0.625, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 29 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.012, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 29.354488372802734, | |
| "kl": 0.14891785010695457, | |
| "learning_rate": 1.16e-06, | |
| "loss": 0.0006, | |
| "num_tokens": 266400.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 30 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0124, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 39.174476623535156, | |
| "kl": 0.15899883769452572, | |
| "learning_rate": 1.2e-06, | |
| "loss": 0.0006, | |
| "num_tokens": 275280.0, | |
| "reward": 0.25, | |
| "reward_std": 0.28867512941360474, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 31 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0128, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 44.24154281616211, | |
| "kl": 0.22540221363306046, | |
| "learning_rate": 1.24e-06, | |
| "loss": 0.0009, | |
| "num_tokens": 284160.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 32 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0132, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 32.92380142211914, | |
| "kl": 0.19463030248880386, | |
| "learning_rate": 1.28e-06, | |
| "loss": 0.0008, | |
| "num_tokens": 293040.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 33 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0136, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 32.65846633911133, | |
| "kl": 0.2380228154361248, | |
| "learning_rate": 1.32e-06, | |
| "loss": 0.001, | |
| "num_tokens": 301920.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 34 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.014, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 77.5787582397461, | |
| "kl": 0.21238164231181145, | |
| "learning_rate": 1.3600000000000001e-06, | |
| "loss": 0.0008, | |
| "num_tokens": 310800.0, | |
| "reward": 0.125, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 35 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0144, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 29.662382125854492, | |
| "kl": 0.29322075098752975, | |
| "learning_rate": 1.4e-06, | |
| "loss": 0.0012, | |
| "num_tokens": 319680.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 36 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0148, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.11992263793945, | |
| "kl": 0.25715991109609604, | |
| "learning_rate": 1.44e-06, | |
| "loss": 0.001, | |
| "num_tokens": 328560.0, | |
| "reward": 0.125, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 37 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0152, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 38.42544937133789, | |
| "kl": 0.2538900189101696, | |
| "learning_rate": 1.48e-06, | |
| "loss": 0.001, | |
| "num_tokens": 337440.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 38 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0156, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 76.46257019042969, | |
| "kl": 0.29158805310726166, | |
| "learning_rate": 1.5199999999999998e-06, | |
| "loss": 0.0012, | |
| "num_tokens": 346320.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.5580127239227295, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 39 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.016, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 50.54710388183594, | |
| "kl": 0.42541511356830597, | |
| "learning_rate": 1.5599999999999999e-06, | |
| "loss": 0.0017, | |
| "num_tokens": 355200.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 40 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0164, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 47.023494720458984, | |
| "kl": 0.3670000210404396, | |
| "learning_rate": 1.6e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 364080.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 41 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0168, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 32.30452346801758, | |
| "kl": 0.38483476638793945, | |
| "learning_rate": 1.6399999999999998e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 372960.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 42 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0172, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 42.685611724853516, | |
| "kl": 0.3910580351948738, | |
| "learning_rate": 1.6799999999999998e-06, | |
| "loss": 0.0016, | |
| "num_tokens": 381840.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 43 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0176, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 39.6999626159668, | |
| "kl": 0.4346514791250229, | |
| "learning_rate": 1.7199999999999998e-06, | |
| "loss": 0.0017, | |
| "num_tokens": 390720.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 44 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.018, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 32.37580490112305, | |
| "kl": 0.38541124016046524, | |
| "learning_rate": 1.7599999999999999e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 399600.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 45 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0184, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.88615798950195, | |
| "kl": 0.3760378360748291, | |
| "learning_rate": 1.8e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 408480.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 46 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0188, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 50.25699996948242, | |
| "kl": 0.4850776717066765, | |
| "learning_rate": 1.84e-06, | |
| "loss": 0.0019, | |
| "num_tokens": 417360.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 47 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0192, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 0.5161222815513611, | |
| "kl": 0.3658723160624504, | |
| "learning_rate": 1.8799999999999998e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 426240.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 48 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0196, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.257171630859375, | |
| "kl": 0.3248850479722023, | |
| "learning_rate": 1.92e-06, | |
| "loss": 0.0013, | |
| "num_tokens": 435120.0, | |
| "reward": 0.875, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.875, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 49 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.02, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 40.35070037841797, | |
| "kl": 0.3114013075828552, | |
| "learning_rate": 1.96e-06, | |
| "loss": 0.0012, | |
| "num_tokens": 444000.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 50 | |
| }, | |
| { | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.02, | |
| "eval_loss": 0.0003695653285831213, | |
| "eval_runtime": 12968.8828, | |
| "eval_samples_per_second": 0.015, | |
| "eval_steps_per_second": 0.004, | |
| "frac_reward_zero_std": 0.54, | |
| "num_tokens": 555000.0, | |
| "reward": 0.445, | |
| "reward_std": 0.23928203105926513, | |
| "rewards/compute_reward/mean": 0.445, | |
| "rewards/compute_reward/std": 0.23928203105926513, | |
| "step": 50 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0204, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 35.21977233886719, | |
| "kl": 0.40479009598493576, | |
| "learning_rate": 2e-06, | |
| "loss": -0.1234, | |
| "num_tokens": 563880.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 51 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0208, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 17.721899032592773, | |
| "kl": 0.3238118216395378, | |
| "learning_rate": 1.9999756307053944e-06, | |
| "loss": 0.1263, | |
| "num_tokens": 572760.0, | |
| "reward": 0.875, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.875, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 52 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0212, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.17813491821289, | |
| "kl": 0.4712698385119438, | |
| "learning_rate": 1.999902524009304e-06, | |
| "loss": -0.1231, | |
| "num_tokens": 581640.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 53 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0216, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 21.3551025390625, | |
| "kl": 0.4696747958660126, | |
| "learning_rate": 1.999780683474845e-06, | |
| "loss": 0.1269, | |
| "num_tokens": 590520.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 54 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.022, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 42.38340377807617, | |
| "kl": 0.5665004104375839, | |
| "learning_rate": 1.999610115040354e-06, | |
| "loss": 0.1272, | |
| "num_tokens": 599400.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 55 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0224, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 40.7006721496582, | |
| "kl": 0.4810464084148407, | |
| "learning_rate": 1.9993908270190957e-06, | |
| "loss": -0.248, | |
| "num_tokens": 608280.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 56 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0228, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 31.320539474487305, | |
| "kl": 0.44268205761909485, | |
| "learning_rate": 1.999122830098858e-06, | |
| "loss": 0.7516, | |
| "num_tokens": 617160.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 57 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0232, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 46.34135818481445, | |
| "kl": 0.4858321249485016, | |
| "learning_rate": 1.998806137341434e-06, | |
| "loss": -0.8729, | |
| "num_tokens": 626040.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 58 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0236, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 50.77784729003906, | |
| "kl": 0.4124316945672035, | |
| "learning_rate": 1.998440764181981e-06, | |
| "loss": 0.4681, | |
| "num_tokens": 634920.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 59 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.024, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 62.64232635498047, | |
| "kl": 0.4631865471601486, | |
| "learning_rate": 1.9980267284282714e-06, | |
| "loss": -0.0896, | |
| "num_tokens": 643800.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 60 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0244, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 55.10979080200195, | |
| "kl": 0.3911132216453552, | |
| "learning_rate": 1.997564050259824e-06, | |
| "loss": 0.1265, | |
| "num_tokens": 652680.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 61 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0248, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 75.01067352294922, | |
| "kl": 0.4750595986843109, | |
| "learning_rate": 1.99705275222692e-06, | |
| "loss": -0.8729, | |
| "num_tokens": 661560.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 62 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0252, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 69.7686996459961, | |
| "kl": 0.4622643366456032, | |
| "learning_rate": 1.9964928592495045e-06, | |
| "loss": -0.3061, | |
| "num_tokens": 670440.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.51933753490448, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 63 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0256, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 78.1102294921875, | |
| "kl": 0.4316819906234741, | |
| "learning_rate": 1.99588439861597e-06, | |
| "loss": 0.8431, | |
| "num_tokens": 679320.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.51933753490448, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 64 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.026, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 70.63719940185547, | |
| "kl": 0.4158880114555359, | |
| "learning_rate": 1.995227399981831e-06, | |
| "loss": 0.0597, | |
| "num_tokens": 688200.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 65 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0264, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 56.348411560058594, | |
| "kl": 0.30301499366760254, | |
| "learning_rate": 1.994521895368273e-06, | |
| "loss": -0.2153, | |
| "num_tokens": 697080.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 66 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0268, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 25.096176147460938, | |
| "kl": 0.27180512621998787, | |
| "learning_rate": 1.9937679191605962e-06, | |
| "loss": 0.7175, | |
| "num_tokens": 705960.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 67 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0272, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 43.11947250366211, | |
| "kl": 0.27942386642098427, | |
| "learning_rate": 1.992965508106537e-06, | |
| "loss": -0.3403, | |
| "num_tokens": 714840.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 68 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0276, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 38.26469802856445, | |
| "kl": 0.24121180921792984, | |
| "learning_rate": 1.9921147013144777e-06, | |
| "loss": -0.124, | |
| "num_tokens": 723720.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 69 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.028, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 16.814804077148438, | |
| "kl": 0.2414722703397274, | |
| "learning_rate": 1.9912155402515414e-06, | |
| "loss": 0.001, | |
| "num_tokens": 732600.0, | |
| "reward": 0.0, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.0, | |
| "rewards/compute_reward/std": 0.0, | |
| "step": 70 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0284, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 58.598567962646484, | |
| "kl": 0.4162477105855942, | |
| "learning_rate": 1.99026806874157e-06, | |
| "loss": -0.2483, | |
| "num_tokens": 741480.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 71 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0288, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 56.5173454284668, | |
| "kl": 0.30467043071985245, | |
| "learning_rate": 1.9892723329629885e-06, | |
| "loss": 0.2512, | |
| "num_tokens": 750360.0, | |
| "reward": 0.25, | |
| "reward_std": 0.28867512941360474, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 72 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0292, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 88.95624542236328, | |
| "kl": 0.35381778329610825, | |
| "learning_rate": 1.9882283814465526e-06, | |
| "loss": 0.5593, | |
| "num_tokens": 759240.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 73 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0296, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 90.53428649902344, | |
| "kl": 0.3509898856282234, | |
| "learning_rate": 1.987136265072988e-06, | |
| "loss": -0.6815, | |
| "num_tokens": 768120.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 74 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.03, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 40.467559814453125, | |
| "kl": 0.27240653708577156, | |
| "learning_rate": 1.985996037070505e-06, | |
| "loss": 0.501, | |
| "num_tokens": 777000.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 75 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0304, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 42.016510009765625, | |
| "kl": 0.3176127299666405, | |
| "learning_rate": 1.984807753012208e-06, | |
| "loss": -0.3737, | |
| "num_tokens": 785880.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 76 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0308, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 42.70841598510742, | |
| "kl": 0.40913257747888565, | |
| "learning_rate": 1.983571470813386e-06, | |
| "loss": 0.2181, | |
| "num_tokens": 794760.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 77 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0312, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.8704833984375, | |
| "kl": 0.30612240731716156, | |
| "learning_rate": 1.9822872507286887e-06, | |
| "loss": -0.4652, | |
| "num_tokens": 803640.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 78 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0316, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 10.744608879089355, | |
| "kl": 0.3573462963104248, | |
| "learning_rate": 1.9809551553491913e-06, | |
| "loss": 0.2514, | |
| "num_tokens": 812520.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 79 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.032, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 44.66023254394531, | |
| "kl": 0.2872606888413429, | |
| "learning_rate": 1.979575249599344e-06, | |
| "loss": 0.1596, | |
| "num_tokens": 821400.0, | |
| "reward": 0.625, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 80 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0324, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.97246170043945, | |
| "kl": 0.41179851442575455, | |
| "learning_rate": 1.9781476007338054e-06, | |
| "loss": -0.2818, | |
| "num_tokens": 830280.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 81 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0328, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 47.648284912109375, | |
| "kl": 0.361647367477417, | |
| "learning_rate": 1.9766722783341677e-06, | |
| "loss": -0.09, | |
| "num_tokens": 839160.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 82 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0332, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 68.1531982421875, | |
| "kl": 0.35677963495254517, | |
| "learning_rate": 1.975149354305563e-06, | |
| "loss": 0.0929, | |
| "num_tokens": 848040.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 83 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0336, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 25.728612899780273, | |
| "kl": 0.299656730145216, | |
| "learning_rate": 1.97357890287316e-06, | |
| "loss": 0.1262, | |
| "num_tokens": 856920.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 84 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.034, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 43.74058151245117, | |
| "kl": 0.3780274763703346, | |
| "learning_rate": 1.9719610005785463e-06, | |
| "loss": -0.215, | |
| "num_tokens": 865800.0, | |
| "reward": 0.375, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 85 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0344, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 22.789318084716797, | |
| "kl": 0.361059308052063, | |
| "learning_rate": 1.9702957262759963e-06, | |
| "loss": 0.2179, | |
| "num_tokens": 874680.0, | |
| "reward": 0.0, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.0, | |
| "rewards/compute_reward/std": 0.0, | |
| "step": 86 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0348, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 35.32533264160156, | |
| "kl": 0.44058068096637726, | |
| "learning_rate": 1.968583161128631e-06, | |
| "loss": 0.0018, | |
| "num_tokens": 883560.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 87 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0352, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.396873474121094, | |
| "kl": 0.40034161880612373, | |
| "learning_rate": 1.9668233886044593e-06, | |
| "loss": 0.3765, | |
| "num_tokens": 892440.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 88 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0356, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 51.92245101928711, | |
| "kl": 0.582250103354454, | |
| "learning_rate": 1.9650164944723112e-06, | |
| "loss": -0.2811, | |
| "num_tokens": 901320.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 89 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.036, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 39.04520797729492, | |
| "kl": 0.46102364361286163, | |
| "learning_rate": 1.963162566797658e-06, | |
| "loss": 0.1603, | |
| "num_tokens": 910200.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 90 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0364, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 64.48860168457031, | |
| "kl": 0.7562894448637962, | |
| "learning_rate": 1.9612616959383188e-06, | |
| "loss": 0.2195, | |
| "num_tokens": 919080.0, | |
| "reward": 0.625, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 91 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0368, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 71.13645935058594, | |
| "kl": 0.584452673792839, | |
| "learning_rate": 1.9593139745400573e-06, | |
| "loss": -0.2141, | |
| "num_tokens": 927960.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 92 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0372, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 44.049346923828125, | |
| "kl": 0.5378477424383163, | |
| "learning_rate": 1.957319497532067e-06, | |
| "loss": -0.2143, | |
| "num_tokens": 936840.0, | |
| "reward": 0.5, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 93 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0376, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 75.5696792602539, | |
| "kl": 0.5544816628098488, | |
| "learning_rate": 1.9552783621223435e-06, | |
| "loss": 0.5601, | |
| "num_tokens": 945720.0, | |
| "reward": 0.75, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.75, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 94 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.038, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 17.455116271972656, | |
| "kl": 0.3371001221239567, | |
| "learning_rate": 1.953190667792947e-06, | |
| "loss": -0.59, | |
| "num_tokens": 954600.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 95 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0384, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 59.06554412841797, | |
| "kl": 0.3672589622437954, | |
| "learning_rate": 1.9510565162951534e-06, | |
| "loss": 0.2179, | |
| "num_tokens": 963480.0, | |
| "reward": 0.625, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 96 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0388, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 22.509157180786133, | |
| "kl": 0.18590721115469933, | |
| "learning_rate": 1.9488760116444964e-06, | |
| "loss": -0.3407, | |
| "num_tokens": 972360.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 97 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0392, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 36.51370620727539, | |
| "kl": 0.25807878002524376, | |
| "learning_rate": 1.9466492601156963e-06, | |
| "loss": 0.126, | |
| "num_tokens": 981240.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 98 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0396, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 13.788187026977539, | |
| "kl": 0.23087293654680252, | |
| "learning_rate": 1.944376370237481e-06, | |
| "loss": 0.2509, | |
| "num_tokens": 990120.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 99 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.04, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 8.3355073928833, | |
| "kl": 0.1271139644086361, | |
| "learning_rate": 1.9420574527872966e-06, | |
| "loss": -0.1245, | |
| "num_tokens": 999000.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 100 | |
| }, | |
| { | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.04, | |
| "eval_loss": -0.00047406076919287443, | |
| "eval_runtime": 10044.1197, | |
| "eval_samples_per_second": 0.02, | |
| "eval_steps_per_second": 0.005, | |
| "frac_reward_zero_std": 0.66, | |
| "num_tokens": 1110000.0, | |
| "reward": 0.415, | |
| "reward_std": 0.17928203105926513, | |
| "rewards/compute_reward/mean": 0.415, | |
| "rewards/compute_reward/std": 0.17928203105926513, | |
| "step": 100 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0404, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 1.7402211427688599, | |
| "kl": 0.12196930311620235, | |
| "learning_rate": 1.9396926207859082e-06, | |
| "loss": 0.217, | |
| "num_tokens": 1118880.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 101 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0408, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 7.048949241638184, | |
| "kl": 0.19601425155997276, | |
| "learning_rate": 1.9372819894918914e-06, | |
| "loss": 0.0008, | |
| "num_tokens": 1127760.0, | |
| "reward": 0.75, | |
| "reward_std": 0.28867512941360474, | |
| "rewards/compute_reward/mean": 0.75, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 102 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0412, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 14.87850284576416, | |
| "kl": 0.20362638682127, | |
| "learning_rate": 1.9348256763960142e-06, | |
| "loss": -0.2157, | |
| "num_tokens": 1136640.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 103 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0416, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 19.110157012939453, | |
| "kl": 0.17501263320446014, | |
| "learning_rate": 1.9323238012155122e-06, | |
| "loss": 0.0007, | |
| "num_tokens": 1145520.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 104 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.042, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 16.843177795410156, | |
| "kl": 0.2675781920552254, | |
| "learning_rate": 1.929776485888251e-06, | |
| "loss": 0.376, | |
| "num_tokens": 1154400.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 105 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0424, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 24.672325134277344, | |
| "kl": 0.2085116021335125, | |
| "learning_rate": 1.9271838545667875e-06, | |
| "loss": -0.3741, | |
| "num_tokens": 1163280.0, | |
| "reward": 0.625, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 106 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0428, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 3.470179319381714, | |
| "kl": 0.3066542334854603, | |
| "learning_rate": 1.9245460336123133e-06, | |
| "loss": 0.1262, | |
| "num_tokens": 1172160.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 107 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0432, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 33.54941940307617, | |
| "kl": 0.36576371639966965, | |
| "learning_rate": 1.9218631515885003e-06, | |
| "loss": 0.1264, | |
| "num_tokens": 1181040.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 108 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0436, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 92.37312316894531, | |
| "kl": 1.9885881915688515, | |
| "learning_rate": 1.9191353392552343e-06, | |
| "loss": 0.008, | |
| "num_tokens": 1189920.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 109 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.044, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 33.686317443847656, | |
| "kl": 0.45819035917520523, | |
| "learning_rate": 1.9163627295622395e-06, | |
| "loss": -0.3731, | |
| "num_tokens": 1198800.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 110 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0444, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 25.443355560302734, | |
| "kl": 0.4073995351791382, | |
| "learning_rate": 1.9135454576426007e-06, | |
| "loss": -0.4983, | |
| "num_tokens": 1207680.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 111 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0448, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 48.13676834106445, | |
| "kl": 0.4856240525841713, | |
| "learning_rate": 1.910683660806177e-06, | |
| "loss": 0.4103, | |
| "num_tokens": 1216560.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 112 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0452, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 26.295862197875977, | |
| "kl": 0.3765817731618881, | |
| "learning_rate": 1.9077774785329087e-06, | |
| "loss": 0.5929, | |
| "num_tokens": 1225440.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 113 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0456, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 60.09946823120117, | |
| "kl": 0.5815477445721626, | |
| "learning_rate": 1.9048270524660196e-06, | |
| "loss": -0.5891, | |
| "num_tokens": 1234320.0, | |
| "reward": 0.25, | |
| "reward_std": 0.28867512941360474, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 114 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.046, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 43.9576530456543, | |
| "kl": 0.48454301804304123, | |
| "learning_rate": 1.9018325264051138e-06, | |
| "loss": 0.4349, | |
| "num_tokens": 1243200.0, | |
| "reward": 0.625, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 115 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0464, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 34.4670295715332, | |
| "kl": 0.6097010597586632, | |
| "learning_rate": 1.8987940462991669e-06, | |
| "loss": 0.4689, | |
| "num_tokens": 1252080.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 116 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0468, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 44.33097839355469, | |
| "kl": 0.6179208308458328, | |
| "learning_rate": 1.8957117602394128e-06, | |
| "loss": -0.8969, | |
| "num_tokens": 1260960.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 117 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0472, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 36.61655044555664, | |
| "kl": 0.6962894722819328, | |
| "learning_rate": 1.8925858184521255e-06, | |
| "loss": -0.3721, | |
| "num_tokens": 1269840.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 118 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0476, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 80.13101959228516, | |
| "kl": 0.6878734081983566, | |
| "learning_rate": 1.8894163732912974e-06, | |
| "loss": 0.3442, | |
| "num_tokens": 1278720.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 119 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.048, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 90.39923858642578, | |
| "kl": 0.7930669263005257, | |
| "learning_rate": 1.8862035792312146e-06, | |
| "loss": 0.1281, | |
| "num_tokens": 1287600.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 120 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0484, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 37.05628204345703, | |
| "kl": 0.7748868465423584, | |
| "learning_rate": 1.8829475928589268e-06, | |
| "loss": 0.1281, | |
| "num_tokens": 1296480.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 121 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0488, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 85.06033325195312, | |
| "kl": 0.9438418298959732, | |
| "learning_rate": 1.8796485728666165e-06, | |
| "loss": -0.6791, | |
| "num_tokens": 1305360.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.51933753490448, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 122 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0492, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 116.63643646240234, | |
| "kl": 1.1643976122140884, | |
| "learning_rate": 1.8763066800438634e-06, | |
| "loss": 0.9375, | |
| "num_tokens": 1314240.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 123 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0496, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 87.37930297851562, | |
| "kl": 1.1998874247074127, | |
| "learning_rate": 1.8729220772698095e-06, | |
| "loss": -0.1202, | |
| "num_tokens": 1323120.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 124 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.05, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 78.85758972167969, | |
| "kl": 1.4393538534641266, | |
| "learning_rate": 1.869494929505219e-06, | |
| "loss": 0.0058, | |
| "num_tokens": 1332000.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 125 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0504, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 103.32394409179688, | |
| "kl": 2.1740673780441284, | |
| "learning_rate": 1.8660254037844386e-06, | |
| "loss": 0.1337, | |
| "num_tokens": 1340880.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 126 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0508, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 121.61659240722656, | |
| "kl": 1.3770731836557388, | |
| "learning_rate": 1.8625136692072574e-06, | |
| "loss": 0.097, | |
| "num_tokens": 1349760.0, | |
| "reward": 0.25, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 127 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0512, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 87.8947525024414, | |
| "kl": 1.433599203824997, | |
| "learning_rate": 1.8589598969306644e-06, | |
| "loss": 0.0972, | |
| "num_tokens": 1358640.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 128 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0516, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 99.49671936035156, | |
| "kl": 1.3952637314796448, | |
| "learning_rate": 1.8553642601605066e-06, | |
| "loss": -0.0524, | |
| "num_tokens": 1367520.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 129 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.052, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 44.72431564331055, | |
| "kl": 1.3837022185325623, | |
| "learning_rate": 1.8517269341430474e-06, | |
| "loss": -0.6193, | |
| "num_tokens": 1376400.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 130 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0524, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 72.39948272705078, | |
| "kl": 1.6086200773715973, | |
| "learning_rate": 1.8480480961564257e-06, | |
| "loss": 0.3479, | |
| "num_tokens": 1385280.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 131 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0528, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 75.30799102783203, | |
| "kl": 1.511486679315567, | |
| "learning_rate": 1.844327925502015e-06, | |
| "loss": 0.0395, | |
| "num_tokens": 1394160.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 132 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0532, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 67.3117446899414, | |
| "kl": 1.4110450446605682, | |
| "learning_rate": 1.8405666034956842e-06, | |
| "loss": -0.5523, | |
| "num_tokens": 1403040.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 133 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0536, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 91.60472869873047, | |
| "kl": 1.495105355978012, | |
| "learning_rate": 1.8367643134589616e-06, | |
| "loss": 0.2224, | |
| "num_tokens": 1411920.0, | |
| "reward": 0.5, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 134 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.054, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 89.93647766113281, | |
| "kl": 1.5621980130672455, | |
| "learning_rate": 1.8329212407100993e-06, | |
| "loss": 0.2227, | |
| "num_tokens": 1420800.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 135 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0544, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 38.31406784057617, | |
| "kl": 1.4404120743274689, | |
| "learning_rate": 1.8290375725550415e-06, | |
| "loss": 0.0058, | |
| "num_tokens": 1429680.0, | |
| "reward": 0.0, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.0, | |
| "rewards/compute_reward/std": 0.0, | |
| "step": 136 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0548, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 67.23009490966797, | |
| "kl": 1.8493570387363434, | |
| "learning_rate": 1.825113498278295e-06, | |
| "loss": 0.2239, | |
| "num_tokens": 1438560.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 137 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0552, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 26.11929702758789, | |
| "kl": 1.655295968055725, | |
| "learning_rate": 1.821149209133704e-06, | |
| "loss": -0.2098, | |
| "num_tokens": 1447440.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 138 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0556, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 68.21998596191406, | |
| "kl": 1.6474502980709076, | |
| "learning_rate": 1.8171448983351283e-06, | |
| "loss": -0.1184, | |
| "num_tokens": 1456320.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 139 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.056, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 74.41326141357422, | |
| "kl": 1.287375420331955, | |
| "learning_rate": 1.8131007610470274e-06, | |
| "loss": 0.0051, | |
| "num_tokens": 1465200.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 140 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0564, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 69.33372497558594, | |
| "kl": 1.3127607554197311, | |
| "learning_rate": 1.8090169943749474e-06, | |
| "loss": 0.8801, | |
| "num_tokens": 1474080.0, | |
| "reward": 0.5, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 141 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0568, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 76.3515625, | |
| "kl": 1.296535700559616, | |
| "learning_rate": 1.804893797355914e-06, | |
| "loss": -0.8027, | |
| "num_tokens": 1482960.0, | |
| "reward": 0.375, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 142 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0572, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 87.84119415283203, | |
| "kl": 1.1495089530944824, | |
| "learning_rate": 1.8007313709487333e-06, | |
| "loss": -0.1873, | |
| "num_tokens": 1491840.0, | |
| "reward": 0.0625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.0625, | |
| "rewards/compute_reward/std": 0.25, | |
| "step": 143 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0576, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 45.25433349609375, | |
| "kl": 1.1036857962608337, | |
| "learning_rate": 1.7965299180241961e-06, | |
| "loss": 0.2209, | |
| "num_tokens": 1500720.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 144 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.058, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 48.654361724853516, | |
| "kl": 1.1962954103946686, | |
| "learning_rate": 1.7922896433551906e-06, | |
| "loss": 0.5382, | |
| "num_tokens": 1509600.0, | |
| "reward": 0.125, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 145 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0584, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 56.95619201660156, | |
| "kl": 1.3726016283035278, | |
| "learning_rate": 1.7880107536067217e-06, | |
| "loss": -0.2445, | |
| "num_tokens": 1518480.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 146 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0588, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 81.80111694335938, | |
| "kl": 1.270168662071228, | |
| "learning_rate": 1.7836934573258397e-06, | |
| "loss": 0.0051, | |
| "num_tokens": 1527360.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 147 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0592, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 55.87933349609375, | |
| "kl": 1.5507261157035828, | |
| "learning_rate": 1.7793379649314742e-06, | |
| "loss": -0.4602, | |
| "num_tokens": 1536240.0, | |
| "reward": 0.625, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 148 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0596, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 40.03895950317383, | |
| "kl": 1.3443613648414612, | |
| "learning_rate": 1.7749444887041795e-06, | |
| "loss": -0.2446, | |
| "num_tokens": 1545120.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 149 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.06, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 60.483856201171875, | |
| "kl": 1.244696855545044, | |
| "learning_rate": 1.7705132427757892e-06, | |
| "loss": 0.2884, | |
| "num_tokens": 1554000.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.51933753490448, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 150 | |
| }, | |
| { | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.06, | |
| "eval_loss": -0.0004801125905942172, | |
| "eval_runtime": 5038.6351, | |
| "eval_samples_per_second": 0.04, | |
| "eval_steps_per_second": 0.01, | |
| "frac_reward_zero_std": 0.64, | |
| "num_tokens": 1665000.0, | |
| "reward": 0.37, | |
| "reward_std": 0.18928203105926514, | |
| "rewards/compute_reward/mean": 0.37, | |
| "rewards/compute_reward/std": 0.18928203105926514, | |
| "step": 150 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0604, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 102.57103729248047, | |
| "kl": 0.8290184140205383, | |
| "learning_rate": 1.766044443118978e-06, | |
| "loss": 0.5032, | |
| "num_tokens": 1673880.0, | |
| "reward": 0.0, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.0, | |
| "rewards/compute_reward/std": 0.0, | |
| "step": 151 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0608, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 10.56777286529541, | |
| "kl": 1.1542262136936188, | |
| "learning_rate": 1.7615383075367368e-06, | |
| "loss": 0.2211, | |
| "num_tokens": 1682760.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 152 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0612, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.89947509765625, | |
| "kl": 0.9573570787906647, | |
| "learning_rate": 1.7569950556517563e-06, | |
| "loss": 0.1623, | |
| "num_tokens": 1691640.0, | |
| "reward": 0.375, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 153 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0616, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 30.090473175048828, | |
| "kl": 0.8319032788276672, | |
| "learning_rate": 1.7524149088957242e-06, | |
| "loss": -0.3716, | |
| "num_tokens": 1700520.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 154 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.062, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 37.990379333496094, | |
| "kl": 0.7478515356779099, | |
| "learning_rate": 1.747798090498532e-06, | |
| "loss": 0.0365, | |
| "num_tokens": 1709400.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 155 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0624, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 27.682296752929688, | |
| "kl": 0.902895525097847, | |
| "learning_rate": 1.743144825477394e-06, | |
| "loss": 0.528, | |
| "num_tokens": 1718280.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 156 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0628, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 50.9190788269043, | |
| "kl": 0.8830957859754562, | |
| "learning_rate": 1.738455340625884e-06, | |
| "loss": -0.4294, | |
| "num_tokens": 1727160.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 157 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0632, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 20.148906707763672, | |
| "kl": 0.7896845638751984, | |
| "learning_rate": 1.7337298645028762e-06, | |
| "loss": -0.4967, | |
| "num_tokens": 1736040.0, | |
| "reward": 0.625, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 158 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0636, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 32.239356994628906, | |
| "kl": 0.804187685251236, | |
| "learning_rate": 1.7289686274214115e-06, | |
| "loss": 0.7196, | |
| "num_tokens": 1744920.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 159 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.064, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 24.03925895690918, | |
| "kl": 0.8029276132583618, | |
| "learning_rate": 1.7241718614374676e-06, | |
| "loss": -0.5882, | |
| "num_tokens": 1753800.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 160 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0644, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 30.868242263793945, | |
| "kl": 0.8987027704715729, | |
| "learning_rate": 1.719339800338651e-06, | |
| "loss": 0.2535, | |
| "num_tokens": 1762680.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 161 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0648, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 19.09716033935547, | |
| "kl": 0.9851841777563095, | |
| "learning_rate": 1.7144726796328032e-06, | |
| "loss": -0.2125, | |
| "num_tokens": 1771560.0, | |
| "reward": 0.1875, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.1875, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 162 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0652, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 32.937808990478516, | |
| "kl": 0.7938939332962036, | |
| "learning_rate": 1.7095707365365209e-06, | |
| "loss": 0.4696, | |
| "num_tokens": 1780440.0, | |
| "reward": 0.125, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 163 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0656, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 37.37956619262695, | |
| "kl": 0.7686180472373962, | |
| "learning_rate": 1.7046342099635947e-06, | |
| "loss": 0.311, | |
| "num_tokens": 1789320.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 164 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.066, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 29.587915420532227, | |
| "kl": 0.6731294244527817, | |
| "learning_rate": 1.6996633405133653e-06, | |
| "loss": -0.5552, | |
| "num_tokens": 1798200.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 165 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0664, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 28.84545135498047, | |
| "kl": 0.874516099691391, | |
| "learning_rate": 1.6946583704589972e-06, | |
| "loss": 0.0035, | |
| "num_tokens": 1807080.0, | |
| "reward": 0.75, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.75, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 166 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0668, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 0.2955371141433716, | |
| "kl": 0.7283145040273666, | |
| "learning_rate": 1.6896195437356697e-06, | |
| "loss": 0.0029, | |
| "num_tokens": 1815960.0, | |
| "reward": 0.0, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.0, | |
| "rewards/compute_reward/std": 0.0, | |
| "step": 167 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0672, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 17.424720764160156, | |
| "kl": 0.9037010669708252, | |
| "learning_rate": 1.6845471059286886e-06, | |
| "loss": 0.2536, | |
| "num_tokens": 1824840.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 168 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0676, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 26.717079162597656, | |
| "kl": 0.6133365780115128, | |
| "learning_rate": 1.6794413042615166e-06, | |
| "loss": -0.031, | |
| "num_tokens": 1833720.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 169 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.068, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 24.280805587768555, | |
| "kl": 0.7366704791784286, | |
| "learning_rate": 1.6743023875837233e-06, | |
| "loss": -0.0885, | |
| "num_tokens": 1842600.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 170 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0684, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 23.280994415283203, | |
| "kl": 0.752781942486763, | |
| "learning_rate": 1.669130606358858e-06, | |
| "loss": -0.3719, | |
| "num_tokens": 1851480.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 171 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0688, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 28.28022575378418, | |
| "kl": 0.7437139600515366, | |
| "learning_rate": 1.6639262126522415e-06, | |
| "loss": 0.2529, | |
| "num_tokens": 1860360.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 172 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0692, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 18.955217361450195, | |
| "kl": 0.51456718146801, | |
| "learning_rate": 1.6586894601186803e-06, | |
| "loss": 0.0355, | |
| "num_tokens": 1869240.0, | |
| "reward": 0.5, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 173 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0696, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 27.11832618713379, | |
| "kl": 0.6736243739724159, | |
| "learning_rate": 1.6534206039901055e-06, | |
| "loss": -0.0308, | |
| "num_tokens": 1878120.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 174 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.07, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 31.381895065307617, | |
| "kl": 0.6929000839591026, | |
| "learning_rate": 1.6481199010631309e-06, | |
| "loss": 0.0028, | |
| "num_tokens": 1887000.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 175 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0704, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 48.79582977294922, | |
| "kl": 0.562452644109726, | |
| "learning_rate": 1.6427876096865393e-06, | |
| "loss": 0.0022, | |
| "num_tokens": 1895880.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 176 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0708, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 14.692187309265137, | |
| "kl": 0.4838453531265259, | |
| "learning_rate": 1.6374239897486897e-06, | |
| "loss": -0.123, | |
| "num_tokens": 1904760.0, | |
| "reward": 0.625, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 177 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0712, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 32.34980773925781, | |
| "kl": 0.5593864694237709, | |
| "learning_rate": 1.6320293026648508e-06, | |
| "loss": 0.0357, | |
| "num_tokens": 1913640.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 178 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0716, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 47.210731506347656, | |
| "kl": 1.054399996995926, | |
| "learning_rate": 1.6266038113644605e-06, | |
| "loss": -0.3372, | |
| "num_tokens": 1922520.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.41367512941360474, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 179 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.072, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 25.16176986694336, | |
| "kl": 0.6305755823850632, | |
| "learning_rate": 1.6211477802783102e-06, | |
| "loss": 0.4355, | |
| "num_tokens": 1931400.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 180 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0724, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 2.085853099822998, | |
| "kl": 0.6664913669228554, | |
| "learning_rate": 1.615661475325658e-06, | |
| "loss": 0.0027, | |
| "num_tokens": 1940280.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 181 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0728, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 0.25921252369880676, | |
| "kl": 0.5506083369255066, | |
| "learning_rate": 1.6101451639012677e-06, | |
| "loss": 0.0022, | |
| "num_tokens": 1949160.0, | |
| "reward": 0.6875, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.6875, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 182 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0732, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 19.31712532043457, | |
| "kl": 0.5009290054440498, | |
| "learning_rate": 1.604599114862375e-06, | |
| "loss": 0.4349, | |
| "num_tokens": 1958040.0, | |
| "reward": 0.375, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.375, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 183 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0736, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 14.426288604736328, | |
| "kl": 0.8416369184851646, | |
| "learning_rate": 1.5990235985155857e-06, | |
| "loss": -0.4296, | |
| "num_tokens": 1966920.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 184 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.074, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 9.469975471496582, | |
| "kl": 0.6057900637388229, | |
| "learning_rate": 1.5934188866037015e-06, | |
| "loss": -0.0311, | |
| "num_tokens": 1975800.0, | |
| "reward": 0.5, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 185 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0744, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 28.550851821899414, | |
| "kl": 0.6094760373234749, | |
| "learning_rate": 1.587785252292473e-06, | |
| "loss": 0.0359, | |
| "num_tokens": 1984680.0, | |
| "reward": 0.625, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 186 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0748, | |
| "frac_reward_zero_std": 0.0, | |
| "grad_norm": 14.759550094604492, | |
| "kl": 0.5186473429203033, | |
| "learning_rate": 1.5821229701572893e-06, | |
| "loss": -0.2479, | |
| "num_tokens": 1993560.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.51933753490448, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 187 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0752, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 24.977191925048828, | |
| "kl": 0.7406383752822876, | |
| "learning_rate": 1.5764323161697932e-06, | |
| "loss": 0.0364, | |
| "num_tokens": 2002440.0, | |
| "reward": 0.125, | |
| "reward_std": 0.14433756470680237, | |
| "rewards/compute_reward/mean": 0.125, | |
| "rewards/compute_reward/std": 0.3415650427341461, | |
| "step": 188 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0756, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 19.971651077270508, | |
| "kl": 0.578004002571106, | |
| "learning_rate": 1.5707135676844319e-06, | |
| "loss": 0.0938, | |
| "num_tokens": 2011320.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 189 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.076, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 12.372550964355469, | |
| "kl": 0.4948463439941406, | |
| "learning_rate": 1.564967003424938e-06, | |
| "loss": 0.3099, | |
| "num_tokens": 2020200.0, | |
| "reward": 0.3125, | |
| "reward_std": 0.26933756470680237, | |
| "rewards/compute_reward/mean": 0.3125, | |
| "rewards/compute_reward/std": 0.4787135720252991, | |
| "step": 190 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0764, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 13.063384056091309, | |
| "kl": 0.4803493767976761, | |
| "learning_rate": 1.5591929034707466e-06, | |
| "loss": 0.0689, | |
| "num_tokens": 2029080.0, | |
| "reward": 0.5, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 191 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0768, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 20.98461151123047, | |
| "kl": 0.5075587034225464, | |
| "learning_rate": 1.553391549243344e-06, | |
| "loss": -0.4979, | |
| "num_tokens": 2037960.0, | |
| "reward": 0.5625, | |
| "reward_std": 0.375, | |
| "rewards/compute_reward/mean": 0.5625, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 192 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0772, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 22.815135955810547, | |
| "kl": 0.5010422468185425, | |
| "learning_rate": 1.5475632234925502e-06, | |
| "loss": 0.3769, | |
| "num_tokens": 2046840.0, | |
| "reward": 0.8125, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.8125, | |
| "rewards/compute_reward/std": 0.40311288833618164, | |
| "step": 193 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0776, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 16.002519607543945, | |
| "kl": 0.4861938878893852, | |
| "learning_rate": 1.54170821028274e-06, | |
| "loss": -0.123, | |
| "num_tokens": 2055720.0, | |
| "reward": 0.25, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 194 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.078, | |
| "frac_reward_zero_std": 0.5, | |
| "grad_norm": 12.706498146057129, | |
| "kl": 0.45841076225042343, | |
| "learning_rate": 1.5358267949789964e-06, | |
| "loss": 0.2518, | |
| "num_tokens": 2064600.0, | |
| "reward": 0.25, | |
| "reward_std": 0.25, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 195 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0784, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 49.78022003173828, | |
| "kl": 0.5610195696353912, | |
| "learning_rate": 1.5299192642332049e-06, | |
| "loss": 0.0357, | |
| "num_tokens": 2073480.0, | |
| "reward": 0.625, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.625, | |
| "rewards/compute_reward/std": 0.5, | |
| "step": 196 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0788, | |
| "frac_reward_zero_std": 0.25, | |
| "grad_norm": 26.672563552856445, | |
| "kl": 0.4388924762606621, | |
| "learning_rate": 1.5239859059700793e-06, | |
| "loss": -0.2817, | |
| "num_tokens": 2082360.0, | |
| "reward": 0.25, | |
| "reward_std": 0.39433756470680237, | |
| "rewards/compute_reward/mean": 0.25, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 197 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0792, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 22.82637596130371, | |
| "kl": 0.39291463047266006, | |
| "learning_rate": 1.5180270093731302e-06, | |
| "loss": 0.0016, | |
| "num_tokens": 2091240.0, | |
| "reward": 0.75, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.75, | |
| "rewards/compute_reward/std": 0.44721361994743347, | |
| "step": 198 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.0796, | |
| "frac_reward_zero_std": 0.75, | |
| "grad_norm": 1.730869174003601, | |
| "kl": 0.4376737102866173, | |
| "learning_rate": 1.5120428648705715e-06, | |
| "loss": -0.1232, | |
| "num_tokens": 2100120.0, | |
| "reward": 0.4375, | |
| "reward_std": 0.125, | |
| "rewards/compute_reward/mean": 0.4375, | |
| "rewards/compute_reward/std": 0.5123475790023804, | |
| "step": 199 | |
| }, | |
| { | |
| "completion_length": 512.0, | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.08, | |
| "frac_reward_zero_std": 1.0, | |
| "grad_norm": 6.7394537925720215, | |
| "kl": 0.4456604868173599, | |
| "learning_rate": 1.5060337641211636e-06, | |
| "loss": 0.1268, | |
| "num_tokens": 2109000.0, | |
| "reward": 0.5, | |
| "reward_std": 0.0, | |
| "rewards/compute_reward/mean": 0.5, | |
| "rewards/compute_reward/std": 0.5163977742195129, | |
| "step": 200 | |
| }, | |
| { | |
| "completions/clipped_ratio": 1.0, | |
| "completions/max_length": 512.0, | |
| "completions/max_terminated_length": 0.0, | |
| "completions/mean_length": 512.0, | |
| "completions/mean_terminated_length": 0.0, | |
| "completions/min_length": 512.0, | |
| "completions/min_terminated_length": 0.0, | |
| "epoch": 0.08, | |
| "eval_loss": 0.0004982490791007876, | |
| "eval_runtime": 7044.2588, | |
| "eval_samples_per_second": 0.028, | |
| "eval_steps_per_second": 0.007, | |
| "frac_reward_zero_std": 0.6122448979591837, | |
| "num_tokens": 2217780.0, | |
| "reward": 0.413265306122449, | |
| "reward_std": 0.19703470444192692, | |
| "rewards/compute_reward/mean": 0.413265306122449, | |
| "rewards/compute_reward/std": 0.19703470444192692, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 2217780, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |