| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.24, | |
| "eval_steps": 500, | |
| "global_step": 450, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "completion_length": 267.57032012939453, | |
| "epoch": 0.0010666666666666667, | |
| "grad_norm": 0.21315283923773715, | |
| "kl": 0.0, | |
| "learning_rate": 1.4285714285714285e-07, | |
| "loss": -0.0, | |
| "reward": 0.05208333465270698, | |
| "reward_std": 0.12187675526365638, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.05208333465270698, | |
| "step": 2 | |
| }, | |
| { | |
| "completion_length": 258.91667556762695, | |
| "epoch": 0.0021333333333333334, | |
| "grad_norm": 0.39624029123072774, | |
| "kl": 0.0005260705947875977, | |
| "learning_rate": 2.857142857142857e-07, | |
| "loss": 0.0, | |
| "reward": 0.09114583535119891, | |
| "reward_std": 0.2148200012743473, | |
| "rewards/equation_reward_func": 0.0026041667442768812, | |
| "rewards/format_reward_func": 0.08854166837409139, | |
| "step": 4 | |
| }, | |
| { | |
| "completion_length": 254.1770896911621, | |
| "epoch": 0.0032, | |
| "grad_norm": 0.2789454187627699, | |
| "kl": 0.0005211830139160156, | |
| "learning_rate": 4.285714285714285e-07, | |
| "loss": 0.0, | |
| "reward": 0.08333333465270698, | |
| "reward_std": 0.20366852125152946, | |
| "rewards/equation_reward_func": 0.0026041667442768812, | |
| "rewards/format_reward_func": 0.08072916860692203, | |
| "step": 6 | |
| }, | |
| { | |
| "completion_length": 266.03125858306885, | |
| "epoch": 0.004266666666666667, | |
| "grad_norm": 0.1672287489912653, | |
| "kl": 0.0005216598510742188, | |
| "learning_rate": 5.714285714285714e-07, | |
| "loss": 0.0, | |
| "reward": 0.059895834885537624, | |
| "reward_std": 0.15923613868653774, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.059895834885537624, | |
| "step": 8 | |
| }, | |
| { | |
| "completion_length": 248.87240409851074, | |
| "epoch": 0.005333333333333333, | |
| "grad_norm": 0.3959884726972001, | |
| "kl": 0.0006082057952880859, | |
| "learning_rate": 7.142857142857143e-07, | |
| "loss": 0.0, | |
| "reward": 0.09635416883975267, | |
| "reward_std": 0.21937653236091137, | |
| "rewards/equation_reward_func": 0.0052083334885537624, | |
| "rewards/format_reward_func": 0.09114583488553762, | |
| "step": 10 | |
| }, | |
| { | |
| "completion_length": 245.3203182220459, | |
| "epoch": 0.0064, | |
| "grad_norm": 0.28583532187086624, | |
| "kl": 0.0007441043853759766, | |
| "learning_rate": 8.57142857142857e-07, | |
| "loss": 0.0, | |
| "reward": 0.0781250016298145, | |
| "reward_std": 0.2006211462430656, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.0781250016298145, | |
| "step": 12 | |
| }, | |
| { | |
| "completion_length": 232.69531726837158, | |
| "epoch": 0.007466666666666667, | |
| "grad_norm": 0.30125607821086126, | |
| "kl": 0.0013909339904785156, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0, | |
| "reward": 0.1015625016298145, | |
| "reward_std": 0.22314835852012038, | |
| "rewards/equation_reward_func": 0.0026041667442768812, | |
| "rewards/format_reward_func": 0.09895833465270698, | |
| "step": 14 | |
| }, | |
| { | |
| "completion_length": 226.94271278381348, | |
| "epoch": 0.008533333333333334, | |
| "grad_norm": 0.3849252255439341, | |
| "kl": 0.00328826904296875, | |
| "learning_rate": 9.999480818449865e-07, | |
| "loss": 0.0, | |
| "reward": 0.14322916930541396, | |
| "reward_std": 0.2906528012827039, | |
| "rewards/equation_reward_func": 0.0052083334885537624, | |
| "rewards/format_reward_func": 0.1380208362825215, | |
| "step": 16 | |
| }, | |
| { | |
| "completion_length": 185.41146278381348, | |
| "epoch": 0.0096, | |
| "grad_norm": 0.4848458043128939, | |
| "kl": 0.008299827575683594, | |
| "learning_rate": 9.997923381619255e-07, | |
| "loss": 0.0, | |
| "reward": 0.22395834000781178, | |
| "reward_std": 0.37577595096081495, | |
| "rewards/equation_reward_func": 0.0052083334885537624, | |
| "rewards/format_reward_func": 0.21875000419095159, | |
| "step": 18 | |
| }, | |
| { | |
| "completion_length": 127.20573091506958, | |
| "epoch": 0.010666666666666666, | |
| "grad_norm": 0.5597326437314926, | |
| "kl": 0.0244293212890625, | |
| "learning_rate": 9.995328012945157e-07, | |
| "loss": 0.0, | |
| "reward": 0.4505208469927311, | |
| "reward_std": 0.4992805514484644, | |
| "rewards/equation_reward_func": 0.01822916720993817, | |
| "rewards/format_reward_func": 0.4322916753590107, | |
| "step": 20 | |
| }, | |
| { | |
| "completion_length": 93.7682318687439, | |
| "epoch": 0.011733333333333333, | |
| "grad_norm": 0.5475672226425161, | |
| "kl": 0.048309326171875, | |
| "learning_rate": 9.991695251414583e-07, | |
| "loss": 0.0, | |
| "reward": 0.5755208488553762, | |
| "reward_std": 0.4716507475823164, | |
| "rewards/equation_reward_func": 0.007812500232830644, | |
| "rewards/format_reward_func": 0.5677083469927311, | |
| "step": 22 | |
| }, | |
| { | |
| "completion_length": 84.74739789962769, | |
| "epoch": 0.0128, | |
| "grad_norm": 0.6354342945220236, | |
| "kl": 0.0808868408203125, | |
| "learning_rate": 9.987025851452636e-07, | |
| "loss": 0.0001, | |
| "reward": 0.7291666865348816, | |
| "reward_std": 0.42415964510291815, | |
| "rewards/equation_reward_func": 0.0052083334885537624, | |
| "rewards/format_reward_func": 0.7239583507180214, | |
| "step": 24 | |
| }, | |
| { | |
| "completion_length": 82.8072943687439, | |
| "epoch": 0.013866666666666666, | |
| "grad_norm": 0.5155554110023303, | |
| "kl": 0.128814697265625, | |
| "learning_rate": 9.981320782765846e-07, | |
| "loss": 0.0001, | |
| "reward": 0.8750000223517418, | |
| "reward_std": 0.270578539930284, | |
| "rewards/equation_reward_func": 0.0026041667442768812, | |
| "rewards/format_reward_func": 0.8723958544433117, | |
| "step": 26 | |
| }, | |
| { | |
| "completion_length": 64.10937762260437, | |
| "epoch": 0.014933333333333333, | |
| "grad_norm": 0.3480478612027264, | |
| "kl": 0.24591064453125, | |
| "learning_rate": 9.974581230140768e-07, | |
| "loss": 0.0002, | |
| "reward": 0.9713541902601719, | |
| "reward_std": 0.12162131909281015, | |
| "rewards/equation_reward_func": 0.007812500232830644, | |
| "rewards/format_reward_func": 0.9635416939854622, | |
| "step": 28 | |
| }, | |
| { | |
| "completion_length": 60.281251668930054, | |
| "epoch": 0.016, | |
| "grad_norm": 0.005562791609333587, | |
| "kl": 0.3465576171875, | |
| "learning_rate": 9.966808593197956e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9869791753590107, | |
| "reward_std": 0.03682847833260894, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9869791753590107, | |
| "step": 30 | |
| }, | |
| { | |
| "completion_length": 45.51562637090683, | |
| "epoch": 0.017066666666666667, | |
| "grad_norm": 0.03097813949935321, | |
| "kl": 0.49072265625, | |
| "learning_rate": 9.958004486101293e-07, | |
| "loss": 0.0005, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 32 | |
| }, | |
| { | |
| "completion_length": 69.03385561704636, | |
| "epoch": 0.018133333333333335, | |
| "grad_norm": 0.006598716445330455, | |
| "kl": 0.3668212890625, | |
| "learning_rate": 9.948170737222762e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 34 | |
| }, | |
| { | |
| "completion_length": 98.88541954755783, | |
| "epoch": 0.0192, | |
| "grad_norm": 0.00733244130846419, | |
| "kl": 0.3455810546875, | |
| "learning_rate": 9.937309388762758e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 36 | |
| }, | |
| { | |
| "completion_length": 79.19531428813934, | |
| "epoch": 0.020266666666666665, | |
| "grad_norm": 0.002140189477159276, | |
| "kl": 0.337646484375, | |
| "learning_rate": 9.925422696325974e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 38 | |
| }, | |
| { | |
| "completion_length": 58.98437696695328, | |
| "epoch": 0.021333333333333333, | |
| "grad_norm": 0.005041926134939414, | |
| "kl": 0.3271484375, | |
| "learning_rate": 9.912513128452973e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 40 | |
| }, | |
| { | |
| "completion_length": 83.58854389190674, | |
| "epoch": 0.0224, | |
| "grad_norm": 0.003080339075344411, | |
| "kl": 0.3336181640625, | |
| "learning_rate": 9.898583366107536e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 42 | |
| }, | |
| { | |
| "completion_length": 68.85677361488342, | |
| "epoch": 0.023466666666666667, | |
| "grad_norm": 0.005847596515205166, | |
| "kl": 0.355712890625, | |
| "learning_rate": 9.88363630211991e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 44 | |
| }, | |
| { | |
| "completion_length": 81.716148853302, | |
| "epoch": 0.024533333333333334, | |
| "grad_norm": 0.006129286662886568, | |
| "kl": 0.351806640625, | |
| "learning_rate": 9.867675040586033e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 46 | |
| }, | |
| { | |
| "completion_length": 62.87239807844162, | |
| "epoch": 0.0256, | |
| "grad_norm": 0.0022686471037775217, | |
| "kl": 0.3529052734375, | |
| "learning_rate": 9.850702896222908e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 48 | |
| }, | |
| { | |
| "completion_length": 84.13802230358124, | |
| "epoch": 0.02666666666666667, | |
| "grad_norm": 0.001575588022391806, | |
| "kl": 0.3443603515625, | |
| "learning_rate": 9.83272339368022e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 50 | |
| }, | |
| { | |
| "completion_length": 70.85937684774399, | |
| "epoch": 0.027733333333333332, | |
| "grad_norm": 0.004893372014791935, | |
| "kl": 0.3424072265625, | |
| "learning_rate": 9.813740266808373e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 52 | |
| }, | |
| { | |
| "completion_length": 73.53125238418579, | |
| "epoch": 0.0288, | |
| "grad_norm": 0.0020037829167036995, | |
| "kl": 0.3336181640625, | |
| "learning_rate": 9.793757457883061e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 54 | |
| }, | |
| { | |
| "completion_length": 85.58073115348816, | |
| "epoch": 0.029866666666666666, | |
| "grad_norm": 0.0052068637869011935, | |
| "kl": 0.3441162109375, | |
| "learning_rate": 9.772779116786567e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 56 | |
| }, | |
| { | |
| "completion_length": 62.17968964576721, | |
| "epoch": 0.030933333333333334, | |
| "grad_norm": 0.004204176702325925, | |
| "kl": 0.3392333984375, | |
| "learning_rate": 9.750809600145952e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 58 | |
| }, | |
| { | |
| "completion_length": 60.56250220537186, | |
| "epoch": 0.032, | |
| "grad_norm": 0.0015647291182938304, | |
| "kl": 0.35205078125, | |
| "learning_rate": 9.7278534704283e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 60 | |
| }, | |
| { | |
| "completion_length": 63.45312720537186, | |
| "epoch": 0.03306666666666667, | |
| "grad_norm": 0.017107550850573614, | |
| "kl": 0.3740234375, | |
| "learning_rate": 9.703915494993213e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 62 | |
| }, | |
| { | |
| "completion_length": 59.20312708616257, | |
| "epoch": 0.034133333333333335, | |
| "grad_norm": 0.002029246366701105, | |
| "kl": 0.344482421875, | |
| "learning_rate": 9.67900064510277e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 64 | |
| }, | |
| { | |
| "completion_length": 49.76041799783707, | |
| "epoch": 0.0352, | |
| "grad_norm": 0.1707108937849788, | |
| "kl": 0.3961181640625, | |
| "learning_rate": 9.653114094889126e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 66 | |
| }, | |
| { | |
| "completion_length": 66.42448157072067, | |
| "epoch": 0.03626666666666667, | |
| "grad_norm": 0.004363766057327376, | |
| "kl": 0.3525390625, | |
| "learning_rate": 9.626261220279987e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 68 | |
| }, | |
| { | |
| "completion_length": 79.07031440734863, | |
| "epoch": 0.037333333333333336, | |
| "grad_norm": 0.002295566859094707, | |
| "kl": 0.3387451171875, | |
| "learning_rate": 9.598447597882179e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 70 | |
| }, | |
| { | |
| "completion_length": 85.9947943687439, | |
| "epoch": 0.0384, | |
| "grad_norm": 0.0028772515347710507, | |
| "kl": 0.3404541015625, | |
| "learning_rate": 9.56967900382354e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 72 | |
| }, | |
| { | |
| "completion_length": 118.54166984558105, | |
| "epoch": 0.039466666666666664, | |
| "grad_norm": 0.003804107622681421, | |
| "kl": 0.35693359375, | |
| "learning_rate": 9.539961412553374e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 74 | |
| }, | |
| { | |
| "completion_length": 132.24739825725555, | |
| "epoch": 0.04053333333333333, | |
| "grad_norm": 0.0026976286800890377, | |
| "kl": 0.35357666015625, | |
| "learning_rate": 9.509300995601719e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 76 | |
| }, | |
| { | |
| "completion_length": 150.5286495089531, | |
| "epoch": 0.0416, | |
| "grad_norm": 0.0029703461859595446, | |
| "kl": 0.3365478515625, | |
| "learning_rate": 9.477704120297696e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 78 | |
| }, | |
| { | |
| "completion_length": 132.16927361488342, | |
| "epoch": 0.042666666666666665, | |
| "grad_norm": 0.0042900161521961236, | |
| "kl": 0.347900390625, | |
| "learning_rate": 9.445177348447186e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 80 | |
| }, | |
| { | |
| "completion_length": 148.73958611488342, | |
| "epoch": 0.04373333333333333, | |
| "grad_norm": 0.009973192234001046, | |
| "kl": 0.3470458984375, | |
| "learning_rate": 9.41172743497012e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 82 | |
| }, | |
| { | |
| "completion_length": 160.5599009990692, | |
| "epoch": 0.0448, | |
| "grad_norm": 0.021317337320502894, | |
| "kl": 0.34527587890625, | |
| "learning_rate": 9.377361326497673e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 84 | |
| }, | |
| { | |
| "completion_length": 214.65365314483643, | |
| "epoch": 0.04586666666666667, | |
| "grad_norm": 0.004309834843107017, | |
| "kl": 0.30181884765625, | |
| "learning_rate": 9.342086159929629e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 86 | |
| }, | |
| { | |
| "completion_length": 297.28125953674316, | |
| "epoch": 0.046933333333333334, | |
| "grad_norm": 0.004971116117055498, | |
| "kl": 0.263671875, | |
| "learning_rate": 9.305909260952254e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 88 | |
| }, | |
| { | |
| "completion_length": 306.1666736602783, | |
| "epoch": 0.048, | |
| "grad_norm": 0.00330346016739013, | |
| "kl": 0.24566650390625, | |
| "learning_rate": 9.268838142516943e-07, | |
| "loss": 0.0002, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 90 | |
| }, | |
| { | |
| "completion_length": 279.96355056762695, | |
| "epoch": 0.04906666666666667, | |
| "grad_norm": 0.00318035450007201, | |
| "kl": 0.2532958984375, | |
| "learning_rate": 9.23088050327999e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 92 | |
| }, | |
| { | |
| "completion_length": 261.7994842529297, | |
| "epoch": 0.050133333333333335, | |
| "grad_norm": 0.0036946086220320164, | |
| "kl": 0.273681640625, | |
| "learning_rate": 9.192044226003788e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 94 | |
| }, | |
| { | |
| "completion_length": 275.30209159851074, | |
| "epoch": 0.0512, | |
| "grad_norm": 0.004650714378941982, | |
| "kl": 0.27423095703125, | |
| "learning_rate": 9.15233737591979e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 96 | |
| }, | |
| { | |
| "completion_length": 275.5442762374878, | |
| "epoch": 0.05226666666666667, | |
| "grad_norm": 0.005874752003273854, | |
| "kl": 0.283935546875, | |
| "learning_rate": 9.111768199053586e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 98 | |
| }, | |
| { | |
| "completion_length": 224.13542079925537, | |
| "epoch": 0.05333333333333334, | |
| "grad_norm": 0.003767361371489657, | |
| "kl": 0.29339599609375, | |
| "learning_rate": 9.070345120512435e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 100 | |
| }, | |
| { | |
| "completion_length": 247.3671932220459, | |
| "epoch": 0.0544, | |
| "grad_norm": 0.21204929441134815, | |
| "kl": 0.5673828125, | |
| "learning_rate": 9.028076742735582e-07, | |
| "loss": 0.0006, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 102 | |
| }, | |
| { | |
| "completion_length": 213.47917222976685, | |
| "epoch": 0.055466666666666664, | |
| "grad_norm": 0.003470673455225982, | |
| "kl": 0.3101806640625, | |
| "learning_rate": 8.984971843707787e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 104 | |
| }, | |
| { | |
| "completion_length": 175.9947967529297, | |
| "epoch": 0.05653333333333333, | |
| "grad_norm": 0.005812568326490043, | |
| "kl": 0.32379150390625, | |
| "learning_rate": 8.94103937513637e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 106 | |
| }, | |
| { | |
| "completion_length": 117.411461353302, | |
| "epoch": 0.0576, | |
| "grad_norm": 0.00354722170703675, | |
| "kl": 0.3551025390625, | |
| "learning_rate": 8.896288460592185e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 108 | |
| }, | |
| { | |
| "completion_length": 110.28125357627869, | |
| "epoch": 0.058666666666666666, | |
| "grad_norm": 0.003013321323384688, | |
| "kl": 0.339599609375, | |
| "learning_rate": 8.850728393614901e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 110 | |
| }, | |
| { | |
| "completion_length": 106.96875274181366, | |
| "epoch": 0.05973333333333333, | |
| "grad_norm": 0.003463116039679215, | |
| "kl": 0.35595703125, | |
| "learning_rate": 8.804368635783002e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 112 | |
| }, | |
| { | |
| "completion_length": 85.82552325725555, | |
| "epoch": 0.0608, | |
| "grad_norm": 0.03677716489910903, | |
| "kl": 0.3817138671875, | |
| "learning_rate": 8.75721881474886e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 114 | |
| }, | |
| { | |
| "completion_length": 69.90885663032532, | |
| "epoch": 0.06186666666666667, | |
| "grad_norm": 0.014502649071450845, | |
| "kl": 0.4462890625, | |
| "learning_rate": 8.709288722239342e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 116 | |
| }, | |
| { | |
| "completion_length": 60.21093875169754, | |
| "epoch": 0.06293333333333333, | |
| "grad_norm": 0.018319766426722457, | |
| "kl": 0.4246826171875, | |
| "learning_rate": 8.660588312022343e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 118 | |
| }, | |
| { | |
| "completion_length": 48.75000184774399, | |
| "epoch": 0.064, | |
| "grad_norm": 0.002819178233147015, | |
| "kl": 0.3572998046875, | |
| "learning_rate": 8.611127697839647e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 120 | |
| }, | |
| { | |
| "completion_length": 29.817708730697632, | |
| "epoch": 0.06506666666666666, | |
| "grad_norm": 0.048219241978314446, | |
| "kl": 0.6165771484375, | |
| "learning_rate": 8.560917151306592e-07, | |
| "loss": 0.0006, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 122 | |
| }, | |
| { | |
| "completion_length": 29.747396767139435, | |
| "epoch": 0.06613333333333334, | |
| "grad_norm": 0.002491973715723316, | |
| "kl": 0.394287109375, | |
| "learning_rate": 8.509967099778933e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 124 | |
| }, | |
| { | |
| "completion_length": 22.74479252099991, | |
| "epoch": 0.0672, | |
| "grad_norm": 0.03159552463178033, | |
| "kl": 0.519287109375, | |
| "learning_rate": 8.458288124187358e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 126 | |
| }, | |
| { | |
| "completion_length": 18.752604603767395, | |
| "epoch": 0.06826666666666667, | |
| "grad_norm": 0.0047626328583877, | |
| "kl": 0.4564208984375, | |
| "learning_rate": 8.405890956840135e-07, | |
| "loss": 0.0005, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 128 | |
| }, | |
| { | |
| "completion_length": 25.395833671092987, | |
| "epoch": 0.06933333333333333, | |
| "grad_norm": 0.007120551314141992, | |
| "kl": 0.4727783203125, | |
| "learning_rate": 8.352786479194287e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 130 | |
| }, | |
| { | |
| "completion_length": 18.677083611488342, | |
| "epoch": 0.0704, | |
| "grad_norm": 0.007749349171876843, | |
| "kl": 0.4617919921875, | |
| "learning_rate": 8.298985719595823e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 132 | |
| }, | |
| { | |
| "completion_length": 28.72395944595337, | |
| "epoch": 0.07146666666666666, | |
| "grad_norm": 0.01725236239408364, | |
| "kl": 0.512451171875, | |
| "learning_rate": 8.244499850989451e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 134 | |
| }, | |
| { | |
| "completion_length": 32.2265630364418, | |
| "epoch": 0.07253333333333334, | |
| "grad_norm": 0.05304546123188986, | |
| "kl": 0.5926513671875, | |
| "learning_rate": 8.189340188598262e-07, | |
| "loss": 0.0006, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 136 | |
| }, | |
| { | |
| "completion_length": 23.41145896911621, | |
| "epoch": 0.0736, | |
| "grad_norm": 0.013190548453262672, | |
| "kl": 0.4632568359375, | |
| "learning_rate": 8.133518187573862e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 138 | |
| }, | |
| { | |
| "completion_length": 24.82552134990692, | |
| "epoch": 0.07466666666666667, | |
| "grad_norm": 0.012046997929078053, | |
| "kl": 0.4591064453125, | |
| "learning_rate": 8.077045440617464e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 140 | |
| }, | |
| { | |
| "completion_length": 17.638021290302277, | |
| "epoch": 0.07573333333333333, | |
| "grad_norm": 0.0070439803543669265, | |
| "kl": 0.425048828125, | |
| "learning_rate": 8.019933675572388e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 142 | |
| }, | |
| { | |
| "completion_length": 19.304688096046448, | |
| "epoch": 0.0768, | |
| "grad_norm": 0.007119537656182656, | |
| "kl": 0.952880859375, | |
| "learning_rate": 7.962194752988518e-07, | |
| "loss": 0.001, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 144 | |
| }, | |
| { | |
| "completion_length": 23.434896171092987, | |
| "epoch": 0.07786666666666667, | |
| "grad_norm": 0.004351846417110043, | |
| "kl": 0.39892578125, | |
| "learning_rate": 7.903840663659184e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 146 | |
| }, | |
| { | |
| "completion_length": 17.36458384990692, | |
| "epoch": 0.07893333333333333, | |
| "grad_norm": 0.014464883726819155, | |
| "kl": 0.427490234375, | |
| "learning_rate": 7.844883526131013e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 148 | |
| }, | |
| { | |
| "completion_length": 23.164062976837158, | |
| "epoch": 0.08, | |
| "grad_norm": 0.003214313828585045, | |
| "kl": 0.38671875, | |
| "learning_rate": 7.785335584187219e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 150 | |
| }, | |
| { | |
| "completion_length": 17.333333790302277, | |
| "epoch": 0.08106666666666666, | |
| "grad_norm": 0.005708191232659279, | |
| "kl": 0.44482421875, | |
| "learning_rate": 7.725209204304928e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 152 | |
| }, | |
| { | |
| "completion_length": 18.481771171092987, | |
| "epoch": 0.08213333333333334, | |
| "grad_norm": 0.18097704136374604, | |
| "kl": 0.6190185546875, | |
| "learning_rate": 7.664516873086987e-07, | |
| "loss": 0.0006, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 154 | |
| }, | |
| { | |
| "completion_length": 20.242188155651093, | |
| "epoch": 0.0832, | |
| "grad_norm": 0.004715553205403204, | |
| "kl": 0.42333984375, | |
| "learning_rate": 7.603271194668835e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 156 | |
| }, | |
| { | |
| "completion_length": 18.898438036441803, | |
| "epoch": 0.08426666666666667, | |
| "grad_norm": 0.003966040759048578, | |
| "kl": 0.403076171875, | |
| "learning_rate": 7.541484888100973e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 158 | |
| }, | |
| { | |
| "completion_length": 16.60416716337204, | |
| "epoch": 0.08533333333333333, | |
| "grad_norm": 0.002003261027905268, | |
| "kl": 0.34228515625, | |
| "learning_rate": 7.479170784707574e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 160 | |
| }, | |
| { | |
| "completion_length": 24.88802134990692, | |
| "epoch": 0.0864, | |
| "grad_norm": 0.009211378308009072, | |
| "kl": 0.384521484375, | |
| "learning_rate": 7.416341825421753e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 162 | |
| }, | |
| { | |
| "completion_length": 16.401042103767395, | |
| "epoch": 0.08746666666666666, | |
| "grad_norm": 0.005184187636080286, | |
| "kl": 0.3958740234375, | |
| "learning_rate": 7.353011058098103e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 164 | |
| }, | |
| { | |
| "completion_length": 21.299479722976685, | |
| "epoch": 0.08853333333333334, | |
| "grad_norm": 0.07832411386720319, | |
| "kl": 0.3946533203125, | |
| "learning_rate": 7.289191634803002e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 166 | |
| }, | |
| { | |
| "completion_length": 21.127604603767395, | |
| "epoch": 0.0896, | |
| "grad_norm": 0.006189211303564496, | |
| "kl": 0.3651123046875, | |
| "learning_rate": 7.224896809083297e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 168 | |
| }, | |
| { | |
| "completion_length": 18.304687917232513, | |
| "epoch": 0.09066666666666667, | |
| "grad_norm": 0.002963052569393672, | |
| "kl": 0.345458984375, | |
| "learning_rate": 7.160139933213898e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 170 | |
| }, | |
| { | |
| "completion_length": 24.544271409511566, | |
| "epoch": 0.09173333333333333, | |
| "grad_norm": 0.006809551131670742, | |
| "kl": 0.376708984375, | |
| "learning_rate": 7.094934455424888e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 172 | |
| }, | |
| { | |
| "completion_length": 23.06510454416275, | |
| "epoch": 0.0928, | |
| "grad_norm": 0.007169853628551621, | |
| "kl": 0.377685546875, | |
| "learning_rate": 7.029293917108677e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 174 | |
| }, | |
| { | |
| "completion_length": 29.372397005558014, | |
| "epoch": 0.09386666666666667, | |
| "grad_norm": 0.0049609216072808585, | |
| "kl": 0.38330078125, | |
| "learning_rate": 6.963231950007844e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 176 | |
| }, | |
| { | |
| "completion_length": 22.596354842185974, | |
| "epoch": 0.09493333333333333, | |
| "grad_norm": 0.0033493670757974436, | |
| "kl": 0.3634033203125, | |
| "learning_rate": 6.896762273384178e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 178 | |
| }, | |
| { | |
| "completion_length": 21.070313334465027, | |
| "epoch": 0.096, | |
| "grad_norm": 0.007358426048838388, | |
| "kl": 0.374267578125, | |
| "learning_rate": 6.829898691169579e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 180 | |
| }, | |
| { | |
| "completion_length": 25.106771528720856, | |
| "epoch": 0.09706666666666666, | |
| "grad_norm": 0.002673392643388894, | |
| "kl": 0.3927001953125, | |
| "learning_rate": 6.762655089099353e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 182 | |
| }, | |
| { | |
| "completion_length": 23.947917222976685, | |
| "epoch": 0.09813333333333334, | |
| "grad_norm": 0.009888137802143636, | |
| "kl": 0.4693603515625, | |
| "learning_rate": 6.695045431828524e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 184 | |
| }, | |
| { | |
| "completion_length": 20.893229484558105, | |
| "epoch": 0.0992, | |
| "grad_norm": 0.0022607662791007638, | |
| "kl": 0.3218994140625, | |
| "learning_rate": 6.627083760031754e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 186 | |
| }, | |
| { | |
| "completion_length": 19.361979722976685, | |
| "epoch": 0.10026666666666667, | |
| "grad_norm": 0.0032574710279835964, | |
| "kl": 0.3485107421875, | |
| "learning_rate": 6.558784187487494e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 188 | |
| }, | |
| { | |
| "completion_length": 20.19270896911621, | |
| "epoch": 0.10133333333333333, | |
| "grad_norm": 0.003981197482172737, | |
| "kl": 0.3470458984375, | |
| "learning_rate": 6.490160898146918e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 190 | |
| }, | |
| { | |
| "completion_length": 19.901042342185974, | |
| "epoch": 0.1024, | |
| "grad_norm": 0.002514384073907443, | |
| "kl": 0.3548583984375, | |
| "learning_rate": 6.421228143188324e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 192 | |
| }, | |
| { | |
| "completion_length": 24.843750834465027, | |
| "epoch": 0.10346666666666667, | |
| "grad_norm": 0.007842781472841063, | |
| "kl": 0.3717041015625, | |
| "learning_rate": 6.352000238057539e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 194 | |
| }, | |
| { | |
| "completion_length": 21.859375715255737, | |
| "epoch": 0.10453333333333334, | |
| "grad_norm": 0.0060397124228908665, | |
| "kl": 0.357666015625, | |
| "learning_rate": 6.282491559495004e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 196 | |
| }, | |
| { | |
| "completion_length": 22.700521767139435, | |
| "epoch": 0.1056, | |
| "grad_norm": 0.005604073044775416, | |
| "kl": 0.4561767578125, | |
| "learning_rate": 6.212716542550112e-07, | |
| "loss": 0.0005, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 198 | |
| }, | |
| { | |
| "completion_length": 22.901042222976685, | |
| "epoch": 0.10666666666666667, | |
| "grad_norm": 0.0026562431855837636, | |
| "kl": 0.3526611328125, | |
| "learning_rate": 6.142689677583445e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 200 | |
| }, | |
| { | |
| "completion_length": 24.648437798023224, | |
| "epoch": 0.10773333333333333, | |
| "grad_norm": 0.002588639622294546, | |
| "kl": 0.3214111328125, | |
| "learning_rate": 6.072425507257527e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 202 | |
| }, | |
| { | |
| "completion_length": 22.382813036441803, | |
| "epoch": 0.1088, | |
| "grad_norm": 0.00323664281124744, | |
| "kl": 0.36767578125, | |
| "learning_rate": 6.001938623516705e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 204 | |
| }, | |
| { | |
| "completion_length": 23.174479961395264, | |
| "epoch": 0.10986666666666667, | |
| "grad_norm": 0.004194373433624107, | |
| "kl": 0.361572265625, | |
| "learning_rate": 5.931243664556802e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 206 | |
| }, | |
| { | |
| "completion_length": 22.494792342185974, | |
| "epoch": 0.11093333333333333, | |
| "grad_norm": 0.0635254172279242, | |
| "kl": 0.389404296875, | |
| "learning_rate": 5.860355311785175e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 208 | |
| }, | |
| { | |
| "completion_length": 24.593750715255737, | |
| "epoch": 0.112, | |
| "grad_norm": 0.0040160843770763, | |
| "kl": 0.381103515625, | |
| "learning_rate": 5.78928828677177e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 210 | |
| }, | |
| { | |
| "completion_length": 19.640625715255737, | |
| "epoch": 0.11306666666666666, | |
| "grad_norm": 0.004056894485336028, | |
| "kl": 0.3604736328125, | |
| "learning_rate": 5.718057348191874e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 212 | |
| }, | |
| { | |
| "completion_length": 26.078125655651093, | |
| "epoch": 0.11413333333333334, | |
| "grad_norm": 0.001426529705853739, | |
| "kl": 0.3330078125, | |
| "learning_rate": 5.646677288761132e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 214 | |
| }, | |
| { | |
| "completion_length": 18.17447954416275, | |
| "epoch": 0.1152, | |
| "grad_norm": 0.00410254063002437, | |
| "kl": 0.360107421875, | |
| "learning_rate": 5.575162932163501e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 216 | |
| }, | |
| { | |
| "completion_length": 19.231771647930145, | |
| "epoch": 0.11626666666666667, | |
| "grad_norm": 0.004454685182350467, | |
| "kl": 0.3358154296875, | |
| "learning_rate": 5.503529129972792e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 218 | |
| }, | |
| { | |
| "completion_length": 22.81250023841858, | |
| "epoch": 0.11733333333333333, | |
| "grad_norm": 0.0033331903371956913, | |
| "kl": 0.3734130859375, | |
| "learning_rate": 5.431790758568388e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 220 | |
| }, | |
| { | |
| "completion_length": 33.567709386348724, | |
| "epoch": 0.1184, | |
| "grad_norm": 0.15156820624659995, | |
| "kl": 0.3978271484375, | |
| "learning_rate": 5.359962716045835e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 222 | |
| }, | |
| { | |
| "completion_length": 23.721354722976685, | |
| "epoch": 0.11946666666666667, | |
| "grad_norm": 0.1302812080137746, | |
| "kl": 0.3497314453125, | |
| "learning_rate": 5.288059919122921e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 224 | |
| }, | |
| { | |
| "completion_length": 23.40104216337204, | |
| "epoch": 0.12053333333333334, | |
| "grad_norm": 0.10213989334337155, | |
| "kl": 0.3587646484375, | |
| "learning_rate": 5.216097300041869e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 226 | |
| }, | |
| { | |
| "completion_length": 28.752604722976685, | |
| "epoch": 0.1216, | |
| "grad_norm": 0.00405575423941108, | |
| "kl": 0.356689453125, | |
| "learning_rate": 5.144089803468332e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 228 | |
| }, | |
| { | |
| "completion_length": 21.56770896911621, | |
| "epoch": 0.12266666666666666, | |
| "grad_norm": 0.006688124816156981, | |
| "kl": 0.38134765625, | |
| "learning_rate": 5.072052383387786e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 230 | |
| }, | |
| { | |
| "completion_length": 30.83333396911621, | |
| "epoch": 0.12373333333333333, | |
| "grad_norm": 0.003894854329196007, | |
| "kl": 0.36962890625, | |
| "learning_rate": 5e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 232 | |
| }, | |
| { | |
| "completion_length": 38.3385426402092, | |
| "epoch": 0.1248, | |
| "grad_norm": 0.003321341352419865, | |
| "kl": 0.339111328125, | |
| "learning_rate": 4.927947616612215e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 234 | |
| }, | |
| { | |
| "completion_length": 25.40104252099991, | |
| "epoch": 0.12586666666666665, | |
| "grad_norm": 0.003665106813183815, | |
| "kl": 0.347900390625, | |
| "learning_rate": 4.855910196531669e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 236 | |
| }, | |
| { | |
| "completion_length": 32.97656351327896, | |
| "epoch": 0.12693333333333334, | |
| "grad_norm": 0.0029113458934489096, | |
| "kl": 0.3333740234375, | |
| "learning_rate": 4.783902699958129e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 238 | |
| }, | |
| { | |
| "completion_length": 32.789063453674316, | |
| "epoch": 0.128, | |
| "grad_norm": 0.2144030619978656, | |
| "kl": 0.98974609375, | |
| "learning_rate": 4.711940080877079e-07, | |
| "loss": 0.001, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 240 | |
| }, | |
| { | |
| "completion_length": 27.617188334465027, | |
| "epoch": 0.12906666666666666, | |
| "grad_norm": 0.004925288676087789, | |
| "kl": 0.355712890625, | |
| "learning_rate": 4.6400372839541647e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 242 | |
| }, | |
| { | |
| "completion_length": 27.859375417232513, | |
| "epoch": 0.13013333333333332, | |
| "grad_norm": 0.004078504522837724, | |
| "kl": 0.3455810546875, | |
| "learning_rate": 4.568209241431614e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 244 | |
| }, | |
| { | |
| "completion_length": 38.55729287862778, | |
| "epoch": 0.1312, | |
| "grad_norm": 0.003946730384617455, | |
| "kl": 0.3524169921875, | |
| "learning_rate": 4.4964708700272086e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 246 | |
| }, | |
| { | |
| "completion_length": 36.60937589406967, | |
| "epoch": 0.13226666666666667, | |
| "grad_norm": 0.0025911319683286625, | |
| "kl": 0.3311767578125, | |
| "learning_rate": 4.424837067836499e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 248 | |
| }, | |
| { | |
| "completion_length": 34.45312637090683, | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 0.02082559848620119, | |
| "kl": 0.3614501953125, | |
| "learning_rate": 4.353322711238869e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 250 | |
| }, | |
| { | |
| "completion_length": 44.54427218437195, | |
| "epoch": 0.1344, | |
| "grad_norm": 0.004583784786433975, | |
| "kl": 0.3564453125, | |
| "learning_rate": 4.2819426518081256e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 252 | |
| }, | |
| { | |
| "completion_length": 32.47656333446503, | |
| "epoch": 0.13546666666666668, | |
| "grad_norm": 0.2516579496285295, | |
| "kl": 0.348388671875, | |
| "learning_rate": 4.21071171322823e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 254 | |
| }, | |
| { | |
| "completion_length": 33.247396886348724, | |
| "epoch": 0.13653333333333334, | |
| "grad_norm": 0.003750844211497512, | |
| "kl": 0.336669921875, | |
| "learning_rate": 4.139644688214826e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 256 | |
| }, | |
| { | |
| "completion_length": 40.09375071525574, | |
| "epoch": 0.1376, | |
| "grad_norm": 0.0056285121180003365, | |
| "kl": 0.3736572265625, | |
| "learning_rate": 4.068756335443198e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 258 | |
| }, | |
| { | |
| "completion_length": 32.55468821525574, | |
| "epoch": 0.13866666666666666, | |
| "grad_norm": 0.19004745287506442, | |
| "kl": 0.3536376953125, | |
| "learning_rate": 3.998061376483297e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 260 | |
| }, | |
| { | |
| "completion_length": 41.53125083446503, | |
| "epoch": 0.13973333333333332, | |
| "grad_norm": 0.005613467195859889, | |
| "kl": 0.3878173828125, | |
| "learning_rate": 3.9275744927424723e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 262 | |
| }, | |
| { | |
| "completion_length": 41.14323008060455, | |
| "epoch": 0.1408, | |
| "grad_norm": 0.004079597325625193, | |
| "kl": 0.341552734375, | |
| "learning_rate": 3.8573103224165547e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 264 | |
| }, | |
| { | |
| "completion_length": 45.320313930511475, | |
| "epoch": 0.14186666666666667, | |
| "grad_norm": 0.004450670587730382, | |
| "kl": 0.3409423828125, | |
| "learning_rate": 3.787283457449889e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 266 | |
| }, | |
| { | |
| "completion_length": 34.77083444595337, | |
| "epoch": 0.14293333333333333, | |
| "grad_norm": 0.005059899644254497, | |
| "kl": 0.3505859375, | |
| "learning_rate": 3.717508440504997e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 268 | |
| }, | |
| { | |
| "completion_length": 34.54948043823242, | |
| "epoch": 0.144, | |
| "grad_norm": 0.23991193127694865, | |
| "kl": 0.3560791015625, | |
| "learning_rate": 3.64799976194246e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 270 | |
| }, | |
| { | |
| "completion_length": 25.007812976837158, | |
| "epoch": 0.14506666666666668, | |
| "grad_norm": 0.004586704746066067, | |
| "kl": 0.342529296875, | |
| "learning_rate": 3.5787718568116757e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 272 | |
| }, | |
| { | |
| "completion_length": 33.093751192092896, | |
| "epoch": 0.14613333333333334, | |
| "grad_norm": 0.07458441934892074, | |
| "kl": 0.3690185546875, | |
| "learning_rate": 3.5098391018530813e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 274 | |
| }, | |
| { | |
| "completion_length": 26.036458909511566, | |
| "epoch": 0.1472, | |
| "grad_norm": 0.0018656461831637952, | |
| "kl": 0.3399658203125, | |
| "learning_rate": 3.4412158125125073e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 276 | |
| }, | |
| { | |
| "completion_length": 27.08854240179062, | |
| "epoch": 0.14826666666666666, | |
| "grad_norm": 0.008897084345492313, | |
| "kl": 0.3919677734375, | |
| "learning_rate": 3.372916239968245e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 278 | |
| }, | |
| { | |
| "completion_length": 29.460938274860382, | |
| "epoch": 0.14933333333333335, | |
| "grad_norm": 0.0038377037869509346, | |
| "kl": 0.3658447265625, | |
| "learning_rate": 3.3049545681714775e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 280 | |
| }, | |
| { | |
| "completion_length": 20.976563274860382, | |
| "epoch": 0.1504, | |
| "grad_norm": 0.005935196775123734, | |
| "kl": 0.365234375, | |
| "learning_rate": 3.2373449109006474e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 282 | |
| }, | |
| { | |
| "completion_length": 28.750000536441803, | |
| "epoch": 0.15146666666666667, | |
| "grad_norm": 0.006185835967833735, | |
| "kl": 0.3516845703125, | |
| "learning_rate": 3.1701013088304206e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 284 | |
| }, | |
| { | |
| "completion_length": 33.2838551402092, | |
| "epoch": 0.15253333333333333, | |
| "grad_norm": 0.004000349539922501, | |
| "kl": 0.3592529296875, | |
| "learning_rate": 3.1032377266158214e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 286 | |
| }, | |
| { | |
| "completion_length": 25.153646647930145, | |
| "epoch": 0.1536, | |
| "grad_norm": 0.006473305213296596, | |
| "kl": 0.3848876953125, | |
| "learning_rate": 3.036768049992157e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 288 | |
| }, | |
| { | |
| "completion_length": 29.770834028720856, | |
| "epoch": 0.15466666666666667, | |
| "grad_norm": 0.003973211467908967, | |
| "kl": 0.3634033203125, | |
| "learning_rate": 2.9707060828913224e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 290 | |
| }, | |
| { | |
| "completion_length": 22.49479216337204, | |
| "epoch": 0.15573333333333333, | |
| "grad_norm": 0.00485962798897511, | |
| "kl": 0.3812255859375, | |
| "learning_rate": 2.9050655445751137e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 292 | |
| }, | |
| { | |
| "completion_length": 20.432292222976685, | |
| "epoch": 0.1568, | |
| "grad_norm": 0.0763280002987439, | |
| "kl": 0.359619140625, | |
| "learning_rate": 2.839860066786103e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 294 | |
| }, | |
| { | |
| "completion_length": 22.606771171092987, | |
| "epoch": 0.15786666666666666, | |
| "grad_norm": 0.007131272098647573, | |
| "kl": 0.407958984375, | |
| "learning_rate": 2.7751031909167045e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 296 | |
| }, | |
| { | |
| "completion_length": 26.085938036441803, | |
| "epoch": 0.15893333333333334, | |
| "grad_norm": 0.006348176701408371, | |
| "kl": 0.374755859375, | |
| "learning_rate": 2.710808365197e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 298 | |
| }, | |
| { | |
| "completion_length": 28.302084028720856, | |
| "epoch": 0.16, | |
| "grad_norm": 0.00329994798018086, | |
| "kl": 0.357421875, | |
| "learning_rate": 2.646988941901898e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 300 | |
| }, | |
| { | |
| "completion_length": 24.33333396911621, | |
| "epoch": 0.16106666666666666, | |
| "grad_norm": 0.0041357777851463405, | |
| "kl": 0.3416748046875, | |
| "learning_rate": 2.583658174578247e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 302 | |
| }, | |
| { | |
| "completion_length": 21.351562976837158, | |
| "epoch": 0.16213333333333332, | |
| "grad_norm": 0.004744062593804727, | |
| "kl": 0.3682861328125, | |
| "learning_rate": 2.520829215292426e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 304 | |
| }, | |
| { | |
| "completion_length": 17.56510454416275, | |
| "epoch": 0.1632, | |
| "grad_norm": 0.004071588499335152, | |
| "kl": 0.3665771484375, | |
| "learning_rate": 2.4585151118990285e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 306 | |
| }, | |
| { | |
| "completion_length": 25.093750953674316, | |
| "epoch": 0.16426666666666667, | |
| "grad_norm": 0.003447178186189921, | |
| "kl": 0.3665771484375, | |
| "learning_rate": 2.396728805331167e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 308 | |
| }, | |
| { | |
| "completion_length": 39.61198019981384, | |
| "epoch": 0.16533333333333333, | |
| "grad_norm": 0.15778923141355317, | |
| "kl": 0.3824462890625, | |
| "learning_rate": 2.3354831269130132e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 310 | |
| }, | |
| { | |
| "completion_length": 20.757813096046448, | |
| "epoch": 0.1664, | |
| "grad_norm": 0.006322854264740659, | |
| "kl": 0.3919677734375, | |
| "learning_rate": 2.2747907956950707e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 312 | |
| }, | |
| { | |
| "completion_length": 18.778646290302277, | |
| "epoch": 0.16746666666666668, | |
| "grad_norm": 0.004277715160452868, | |
| "kl": 0.3692626953125, | |
| "learning_rate": 2.2146644158127826e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 314 | |
| }, | |
| { | |
| "completion_length": 22.914063036441803, | |
| "epoch": 0.16853333333333334, | |
| "grad_norm": 0.005925839314806503, | |
| "kl": 0.3868408203125, | |
| "learning_rate": 2.1551164738689892e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 316 | |
| }, | |
| { | |
| "completion_length": 19.528646647930145, | |
| "epoch": 0.1696, | |
| "grad_norm": 0.004034463075140326, | |
| "kl": 0.3629150390625, | |
| "learning_rate": 2.0961593363408154e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 318 | |
| }, | |
| { | |
| "completion_length": 26.062500655651093, | |
| "epoch": 0.17066666666666666, | |
| "grad_norm": 0.003874147172074953, | |
| "kl": 0.3551025390625, | |
| "learning_rate": 2.037805247011482e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 320 | |
| }, | |
| { | |
| "completion_length": 22.450521290302277, | |
| "epoch": 0.17173333333333332, | |
| "grad_norm": 0.0036545323789303995, | |
| "kl": 0.3665771484375, | |
| "learning_rate": 1.9800663244276127e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 322 | |
| }, | |
| { | |
| "completion_length": 33.1458340883255, | |
| "epoch": 0.1728, | |
| "grad_norm": 0.006124961025482053, | |
| "kl": 0.358642578125, | |
| "learning_rate": 1.9229545593825363e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 324 | |
| }, | |
| { | |
| "completion_length": 25.63541728258133, | |
| "epoch": 0.17386666666666667, | |
| "grad_norm": 0.004330409449251065, | |
| "kl": 0.38134765625, | |
| "learning_rate": 1.8664818124261373e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 326 | |
| }, | |
| { | |
| "completion_length": 30.52343863248825, | |
| "epoch": 0.17493333333333333, | |
| "grad_norm": 0.004429155190479566, | |
| "kl": 0.3779296875, | |
| "learning_rate": 1.8106598114017397e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 328 | |
| }, | |
| { | |
| "completion_length": 29.343750715255737, | |
| "epoch": 0.176, | |
| "grad_norm": 0.004794753409205765, | |
| "kl": 0.361083984375, | |
| "learning_rate": 1.7555001490105486e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 330 | |
| }, | |
| { | |
| "completion_length": 28.692708909511566, | |
| "epoch": 0.17706666666666668, | |
| "grad_norm": 0.005714723884457156, | |
| "kl": 0.3719482421875, | |
| "learning_rate": 1.7010142804041783e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 332 | |
| }, | |
| { | |
| "completion_length": 24.77864646911621, | |
| "epoch": 0.17813333333333334, | |
| "grad_norm": 0.004745602217539158, | |
| "kl": 0.35888671875, | |
| "learning_rate": 1.6472135208057125e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 334 | |
| }, | |
| { | |
| "completion_length": 30.609375655651093, | |
| "epoch": 0.1792, | |
| "grad_norm": 0.004920986126883945, | |
| "kl": 0.380126953125, | |
| "learning_rate": 1.5941090431598653e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 336 | |
| }, | |
| { | |
| "completion_length": 32.19010490179062, | |
| "epoch": 0.18026666666666666, | |
| "grad_norm": 0.004022967836188304, | |
| "kl": 0.3558349609375, | |
| "learning_rate": 1.5417118758126408e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 338 | |
| }, | |
| { | |
| "completion_length": 28.093750953674316, | |
| "epoch": 0.18133333333333335, | |
| "grad_norm": 0.003832900467363359, | |
| "kl": 0.3629150390625, | |
| "learning_rate": 1.490032900221068e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 340 | |
| }, | |
| { | |
| "completion_length": 28.463542699813843, | |
| "epoch": 0.1824, | |
| "grad_norm": 0.005487525138346315, | |
| "kl": 0.3526611328125, | |
| "learning_rate": 1.4390828486934058e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 342 | |
| }, | |
| { | |
| "completion_length": 24.825521528720856, | |
| "epoch": 0.18346666666666667, | |
| "grad_norm": 0.0031850458946458297, | |
| "kl": 0.36962890625, | |
| "learning_rate": 1.3888723021603526e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 344 | |
| }, | |
| { | |
| "completion_length": 42.6744801402092, | |
| "epoch": 0.18453333333333333, | |
| "grad_norm": 0.0035741821948086553, | |
| "kl": 0.34521484375, | |
| "learning_rate": 1.3394116879776567e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 346 | |
| }, | |
| { | |
| "completion_length": 32.55729299783707, | |
| "epoch": 0.1856, | |
| "grad_norm": 0.0047430778934828615, | |
| "kl": 0.3699951171875, | |
| "learning_rate": 1.2907112777606576e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 348 | |
| }, | |
| { | |
| "completion_length": 32.25260508060455, | |
| "epoch": 0.18666666666666668, | |
| "grad_norm": 0.003936651676485424, | |
| "kl": 0.3486328125, | |
| "learning_rate": 1.2427811852511395e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 350 | |
| }, | |
| { | |
| "completion_length": 30.427084147930145, | |
| "epoch": 0.18773333333333334, | |
| "grad_norm": 0.0023807576550031397, | |
| "kl": 0.3360595703125, | |
| "learning_rate": 1.1956313642169973e-07, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 352 | |
| }, | |
| { | |
| "completion_length": 34.77864670753479, | |
| "epoch": 0.1888, | |
| "grad_norm": 0.15089837821656296, | |
| "kl": 0.34228515625, | |
| "learning_rate": 1.1492716063850971e-07, | |
| "loss": 0.0003, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 354 | |
| }, | |
| { | |
| "completion_length": 25.59895896911621, | |
| "epoch": 0.18986666666666666, | |
| "grad_norm": 0.004901087575209092, | |
| "kl": 0.3863525390625, | |
| "learning_rate": 1.1037115394078162e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 356 | |
| }, | |
| { | |
| "completion_length": 27.411459028720856, | |
| "epoch": 0.19093333333333334, | |
| "grad_norm": 0.2515997335919245, | |
| "kl": 0.415283203125, | |
| "learning_rate": 1.058960624863629e-07, | |
| "loss": 0.0004, | |
| "reward": 0.9947916716337204, | |
| "reward_std": 0.014731391333043575, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9947916716337204, | |
| "step": 358 | |
| }, | |
| { | |
| "completion_length": 24.864584028720856, | |
| "epoch": 0.192, | |
| "grad_norm": 0.0046814573951274975, | |
| "kl": 0.3677978515625, | |
| "learning_rate": 1.015028156292212e-07, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 360 | |
| }, | |
| { | |
| "completion_length": 26.3255215883255, | |
| "epoch": 0.19306666666666666, | |
| "grad_norm": 0.003523171914321313, | |
| "kl": 0.3526611328125, | |
| "learning_rate": 9.719232572644187e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 362 | |
| }, | |
| { | |
| "completion_length": 17.119792222976685, | |
| "epoch": 0.19413333333333332, | |
| "grad_norm": 0.0033418329191399116, | |
| "kl": 0.3404541015625, | |
| "learning_rate": 9.296548794875658e-08, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 364 | |
| }, | |
| { | |
| "completion_length": 23.22135478258133, | |
| "epoch": 0.1952, | |
| "grad_norm": 0.003827547965209838, | |
| "kl": 0.3699951171875, | |
| "learning_rate": 8.882318009464123e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 366 | |
| }, | |
| { | |
| "completion_length": 25.372396528720856, | |
| "epoch": 0.19626666666666667, | |
| "grad_norm": 0.005771301327474302, | |
| "kl": 0.390869140625, | |
| "learning_rate": 8.476626240802099e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 368 | |
| }, | |
| { | |
| "completion_length": 28.466146528720856, | |
| "epoch": 0.19733333333333333, | |
| "grad_norm": 0.005304014054373385, | |
| "kl": 0.3919677734375, | |
| "learning_rate": 8.079557739962128e-08, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 370 | |
| }, | |
| { | |
| "completion_length": 28.109375596046448, | |
| "epoch": 0.1984, | |
| "grad_norm": 0.008746822168976117, | |
| "kl": 0.381591796875, | |
| "learning_rate": 7.691194967200098e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 372 | |
| }, | |
| { | |
| "completion_length": 29.783854722976685, | |
| "epoch": 0.19946666666666665, | |
| "grad_norm": 0.003852702379103747, | |
| "kl": 0.3544921875, | |
| "learning_rate": 7.311618574830569e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 374 | |
| }, | |
| { | |
| "completion_length": 37.625001072883606, | |
| "epoch": 0.20053333333333334, | |
| "grad_norm": 0.006486759121452462, | |
| "kl": 0.382568359375, | |
| "learning_rate": 6.940907390477457e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 376 | |
| }, | |
| { | |
| "completion_length": 22.640625834465027, | |
| "epoch": 0.2016, | |
| "grad_norm": 0.005855790645544301, | |
| "kl": 0.3614501953125, | |
| "learning_rate": 6.579138400703715e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 378 | |
| }, | |
| { | |
| "completion_length": 33.35937559604645, | |
| "epoch": 0.20266666666666666, | |
| "grad_norm": 0.0045352649165850195, | |
| "kl": 0.3572998046875, | |
| "learning_rate": 6.22638673502327e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 380 | |
| }, | |
| { | |
| "completion_length": 37.419271528720856, | |
| "epoch": 0.20373333333333332, | |
| "grad_norm": 0.0055192004314618985, | |
| "kl": 0.3720703125, | |
| "learning_rate": 5.882725650298787e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 382 | |
| }, | |
| { | |
| "completion_length": 35.075521647930145, | |
| "epoch": 0.2048, | |
| "grad_norm": 0.00906841179693692, | |
| "kl": 0.388671875, | |
| "learning_rate": 5.548226515528132e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 384 | |
| }, | |
| { | |
| "completion_length": 44.68489694595337, | |
| "epoch": 0.20586666666666667, | |
| "grad_norm": 0.005997242826384812, | |
| "kl": 0.363037109375, | |
| "learning_rate": 5.222958797023036e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 386 | |
| }, | |
| { | |
| "completion_length": 36.622397005558014, | |
| "epoch": 0.20693333333333333, | |
| "grad_norm": 0.10183351032451872, | |
| "kl": 0.375244140625, | |
| "learning_rate": 4.9069900439828115e-08, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 388 | |
| }, | |
| { | |
| "completion_length": 27.940105140209198, | |
| "epoch": 0.208, | |
| "grad_norm": 0.006355436428529637, | |
| "kl": 0.3638916015625, | |
| "learning_rate": 4.600385874466256e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 390 | |
| }, | |
| { | |
| "completion_length": 26.825521230697632, | |
| "epoch": 0.20906666666666668, | |
| "grad_norm": 0.005474495697015851, | |
| "kl": 0.3861083984375, | |
| "learning_rate": 4.303209961764587e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 392 | |
| }, | |
| { | |
| "completion_length": 43.97395968437195, | |
| "epoch": 0.21013333333333334, | |
| "grad_norm": 0.0034248967357224185, | |
| "kl": 0.3330078125, | |
| "learning_rate": 4.015524021178196e-08, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 394 | |
| }, | |
| { | |
| "completion_length": 33.33333444595337, | |
| "epoch": 0.2112, | |
| "grad_norm": 0.006434693223212768, | |
| "kl": 0.3935546875, | |
| "learning_rate": 3.7373877972001255e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 396 | |
| }, | |
| { | |
| "completion_length": 32.841146647930145, | |
| "epoch": 0.21226666666666666, | |
| "grad_norm": 0.004929606396670415, | |
| "kl": 0.3687744140625, | |
| "learning_rate": 3.46885905110873e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 398 | |
| }, | |
| { | |
| "completion_length": 25.093750596046448, | |
| "epoch": 0.21333333333333335, | |
| "grad_norm": 0.005791877214539501, | |
| "kl": 0.3695068359375, | |
| "learning_rate": 3.20999354897229e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 400 | |
| }, | |
| { | |
| "completion_length": 32.02604269981384, | |
| "epoch": 0.2144, | |
| "grad_norm": 0.005202107306989119, | |
| "kl": 0.3798828125, | |
| "learning_rate": 2.9608450500678562e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 402 | |
| }, | |
| { | |
| "completion_length": 27.648438274860382, | |
| "epoch": 0.21546666666666667, | |
| "grad_norm": 0.005371398662773084, | |
| "kl": 0.3856201171875, | |
| "learning_rate": 2.721465295716996e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 404 | |
| }, | |
| { | |
| "completion_length": 27.742188096046448, | |
| "epoch": 0.21653333333333333, | |
| "grad_norm": 0.006378137301419084, | |
| "kl": 0.3594970703125, | |
| "learning_rate": 2.4919039985404622e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 406 | |
| }, | |
| { | |
| "completion_length": 24.539063334465027, | |
| "epoch": 0.2176, | |
| "grad_norm": 0.006280450050690689, | |
| "kl": 0.3812255859375, | |
| "learning_rate": 2.2722088321343258e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 408 | |
| }, | |
| { | |
| "completion_length": 29.023438096046448, | |
| "epoch": 0.21866666666666668, | |
| "grad_norm": 0.007309421154996187, | |
| "kl": 0.390380859375, | |
| "learning_rate": 2.0624254211693894e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 410 | |
| }, | |
| { | |
| "completion_length": 27.057292222976685, | |
| "epoch": 0.21973333333333334, | |
| "grad_norm": 0.004269729933642793, | |
| "kl": 0.3533935546875, | |
| "learning_rate": 1.8625973319162602e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 412 | |
| }, | |
| { | |
| "completion_length": 29.026042342185974, | |
| "epoch": 0.2208, | |
| "grad_norm": 0.0076460713410529055, | |
| "kl": 0.4102783203125, | |
| "learning_rate": 1.672766063197789e-08, | |
| "loss": 0.0004, | |
| "reward": 0.9973958358168602, | |
| "reward_std": 0.007365695666521788, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 0.9973958358168602, | |
| "step": 414 | |
| }, | |
| { | |
| "completion_length": 39.75000101327896, | |
| "epoch": 0.22186666666666666, | |
| "grad_norm": 0.00506495764972648, | |
| "kl": 0.365234375, | |
| "learning_rate": 1.492971037770924e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 416 | |
| }, | |
| { | |
| "completion_length": 24.554688572883606, | |
| "epoch": 0.22293333333333334, | |
| "grad_norm": 0.003635383901655059, | |
| "kl": 0.356689453125, | |
| "learning_rate": 1.3232495941396637e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 418 | |
| }, | |
| { | |
| "completion_length": 27.872396171092987, | |
| "epoch": 0.224, | |
| "grad_norm": 0.005264483567820246, | |
| "kl": 0.374755859375, | |
| "learning_rate": 1.1636369788008971e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 420 | |
| }, | |
| { | |
| "completion_length": 26.59114670753479, | |
| "epoch": 0.22506666666666666, | |
| "grad_norm": 0.0037470230331634537, | |
| "kl": 0.3553466796875, | |
| "learning_rate": 1.014166338924627e-08, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 422 | |
| }, | |
| { | |
| "completion_length": 26.52343863248825, | |
| "epoch": 0.22613333333333333, | |
| "grad_norm": 0.0050309925811448716, | |
| "kl": 0.371826171875, | |
| "learning_rate": 8.748687154702672e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 424 | |
| }, | |
| { | |
| "completion_length": 42.177085161209106, | |
| "epoch": 0.2272, | |
| "grad_norm": 0.004690685442083978, | |
| "kl": 0.3511962890625, | |
| "learning_rate": 7.457730367402549e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 426 | |
| }, | |
| { | |
| "completion_length": 33.70052194595337, | |
| "epoch": 0.22826666666666667, | |
| "grad_norm": 0.0037686388761955696, | |
| "kl": 0.35205078125, | |
| "learning_rate": 6.269061123724162e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 428 | |
| }, | |
| { | |
| "completion_length": 27.914063453674316, | |
| "epoch": 0.22933333333333333, | |
| "grad_norm": 0.003330651432955464, | |
| "kl": 0.354736328125, | |
| "learning_rate": 5.182926277723821e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 430 | |
| }, | |
| { | |
| "completion_length": 34.72656321525574, | |
| "epoch": 0.2304, | |
| "grad_norm": 0.004691669166337136, | |
| "kl": 0.3543701171875, | |
| "learning_rate": 4.199551389870659e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 432 | |
| }, | |
| { | |
| "completion_length": 21.466146230697632, | |
| "epoch": 0.23146666666666665, | |
| "grad_norm": 0.006847054731283313, | |
| "kl": 0.3829345703125, | |
| "learning_rate": 3.3191406802041688e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 434 | |
| }, | |
| { | |
| "completion_length": 24.791667580604553, | |
| "epoch": 0.23253333333333334, | |
| "grad_norm": 0.004002274972123922, | |
| "kl": 0.4224853515625, | |
| "learning_rate": 2.541876985923119e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 436 | |
| }, | |
| { | |
| "completion_length": 26.74479252099991, | |
| "epoch": 0.2336, | |
| "grad_norm": 0.004841844168761783, | |
| "kl": 0.3985595703125, | |
| "learning_rate": 1.867921723415433e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 438 | |
| }, | |
| { | |
| "completion_length": 30.26562613248825, | |
| "epoch": 0.23466666666666666, | |
| "grad_norm": 0.0037147860959137894, | |
| "kl": 0.3653564453125, | |
| "learning_rate": 1.2974148547362228e-09, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 440 | |
| }, | |
| { | |
| "completion_length": 39.00781399011612, | |
| "epoch": 0.23573333333333332, | |
| "grad_norm": 0.0036142159951702123, | |
| "kl": 0.3477783203125, | |
| "learning_rate": 8.304748585417076e-10, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 442 | |
| }, | |
| { | |
| "completion_length": 22.03385466337204, | |
| "epoch": 0.2368, | |
| "grad_norm": 0.004359450323671437, | |
| "kl": 0.3602294921875, | |
| "learning_rate": 4.671987054842841e-10, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 444 | |
| }, | |
| { | |
| "completion_length": 25.75520932674408, | |
| "epoch": 0.23786666666666667, | |
| "grad_norm": 0.0055438604826414635, | |
| "kl": 0.366455078125, | |
| "learning_rate": 2.076618380744133e-10, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 446 | |
| }, | |
| { | |
| "completion_length": 29.468751192092896, | |
| "epoch": 0.23893333333333333, | |
| "grad_norm": 0.004686764994007478, | |
| "kl": 0.3499755859375, | |
| "learning_rate": 5.191815501343066e-11, | |
| "loss": 0.0003, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 448 | |
| }, | |
| { | |
| "completion_length": 23.330729842185974, | |
| "epoch": 0.24, | |
| "grad_norm": 0.002691527304301135, | |
| "kl": 0.359375, | |
| "learning_rate": 0.0, | |
| "loss": 0.0004, | |
| "reward": 1.0, | |
| "reward_std": 0.0, | |
| "rewards/equation_reward_func": 0.0, | |
| "rewards/format_reward_func": 1.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "step": 450, | |
| "total_flos": 0.0, | |
| "train_loss": 0.00035568679777363087, | |
| "train_runtime": 8958.7654, | |
| "train_samples_per_second": 1.206, | |
| "train_steps_per_second": 0.05 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 450, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |