| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9956108266276518, | |
| "eval_steps": 500, | |
| "global_step": 1023, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.029261155815654718, | |
| "grad_norm": 63.33159396582597, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7981, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.058522311631309436, | |
| "grad_norm": 1.2985053942795166, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7394, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08778346744696415, | |
| "grad_norm": 1.2994972414920418, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7076, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11704462326261887, | |
| "grad_norm": 1.09154200562291, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6974, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14630577907827358, | |
| "grad_norm": 1.1635059184964134, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6882, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1755669348939283, | |
| "grad_norm": 0.9663792155845926, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6781, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.20482809070958302, | |
| "grad_norm": 0.6939099177117586, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6713, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.23408924652523774, | |
| "grad_norm": 0.4554483136685933, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6655, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.26335040234089246, | |
| "grad_norm": 0.43998686977438867, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6624, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.29261155815654716, | |
| "grad_norm": 0.4681462289171194, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6547, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3218727139722019, | |
| "grad_norm": 0.4088427127266188, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6635, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3511338697878566, | |
| "grad_norm": 0.36705252012743494, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6683, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.38039502560351135, | |
| "grad_norm": 0.374209969368988, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6522, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.40965618141916604, | |
| "grad_norm": 0.3565722412862572, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6529, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4389173372348208, | |
| "grad_norm": 0.34204106396539435, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6483, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4681784930504755, | |
| "grad_norm": 0.38615414130675385, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6502, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.49743964886613024, | |
| "grad_norm": 0.3634230078003702, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6552, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.5267008046817849, | |
| "grad_norm": 0.3340266332324882, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6549, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5559619604974396, | |
| "grad_norm": 0.35590307178082214, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6434, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5852231163130943, | |
| "grad_norm": 0.3448405016272299, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6446, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6144842721287491, | |
| "grad_norm": 0.3385609115523372, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6511, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6437454279444038, | |
| "grad_norm": 0.3534390098067362, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6392, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6730065837600585, | |
| "grad_norm": 0.3619673231162035, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6391, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7022677395757132, | |
| "grad_norm": 0.3367671267006255, | |
| "learning_rate": 5e-06, | |
| "loss": 0.646, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.731528895391368, | |
| "grad_norm": 0.34227548100303745, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6443, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.7607900512070227, | |
| "grad_norm": 0.3408993257912956, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6469, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.7900512070226774, | |
| "grad_norm": 0.3790106895262509, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6492, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8193123628383321, | |
| "grad_norm": 0.3429230463611281, | |
| "learning_rate": 5e-06, | |
| "loss": 0.648, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.8485735186539868, | |
| "grad_norm": 0.33672564322042764, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6402, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8778346744696416, | |
| "grad_norm": 0.34609131513685526, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6321, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9070958302852963, | |
| "grad_norm": 0.34957045568901657, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6452, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.936356986100951, | |
| "grad_norm": 0.33437406171376016, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6372, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9656181419166057, | |
| "grad_norm": 0.371533848542589, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6404, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9948792977322605, | |
| "grad_norm": 0.35130336452701355, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6364, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.9978054133138259, | |
| "eval_loss": 0.640914797782898, | |
| "eval_runtime": 345.6005, | |
| "eval_samples_per_second": 26.646, | |
| "eval_steps_per_second": 0.417, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 1.025237746891002, | |
| "grad_norm": 0.3692130458298124, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6524, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.054498902706657, | |
| "grad_norm": 0.3553119811115944, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6067, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.0837600585223117, | |
| "grad_norm": 0.40346799763343727, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6101, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.1130212143379663, | |
| "grad_norm": 0.3197358067569843, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6075, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.142282370153621, | |
| "grad_norm": 0.40727251989734975, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6127, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.1715435259692757, | |
| "grad_norm": 0.3623259217407185, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6142, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2008046817849305, | |
| "grad_norm": 0.3581625285849574, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6139, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.2300658376005853, | |
| "grad_norm": 0.36695340881733157, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6021, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.2593269934162399, | |
| "grad_norm": 0.3572031445012923, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6122, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.2885881492318947, | |
| "grad_norm": 0.34962802000050813, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6048, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.3178493050475493, | |
| "grad_norm": 0.35755536354001544, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6086, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.347110460863204, | |
| "grad_norm": 0.37570399727715037, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6062, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.3763716166788589, | |
| "grad_norm": 0.3228789388457264, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6035, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.4056327724945135, | |
| "grad_norm": 0.3634169062758459, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6044, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.4348939283101683, | |
| "grad_norm": 0.3502861798040734, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6108, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.464155084125823, | |
| "grad_norm": 0.34093533114824826, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5991, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.4934162399414777, | |
| "grad_norm": 0.3463701037893924, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6071, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.5226773957571325, | |
| "grad_norm": 0.38931672754338176, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6095, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.5519385515727873, | |
| "grad_norm": 0.34906068590167777, | |
| "learning_rate": 5e-06, | |
| "loss": 0.608, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.5811997073884418, | |
| "grad_norm": 0.3701456606885589, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6146, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.6104608632040964, | |
| "grad_norm": 0.3506849233715239, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6056, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.6397220190197512, | |
| "grad_norm": 0.35747637775205837, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6074, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.668983174835406, | |
| "grad_norm": 0.32158800583674974, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5979, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.6982443306510606, | |
| "grad_norm": 0.3528344876208815, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6072, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.7275054864667154, | |
| "grad_norm": 0.41985622969098957, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6075, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.7567666422823702, | |
| "grad_norm": 0.34105125625368893, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6044, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.7860277980980248, | |
| "grad_norm": 0.34589478838024207, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6069, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.8152889539136796, | |
| "grad_norm": 0.32553452148702333, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6073, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.8445501097293344, | |
| "grad_norm": 0.3583629997971789, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6063, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.873811265544989, | |
| "grad_norm": 0.3258762553733896, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6012, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.9030724213606436, | |
| "grad_norm": 0.36542172455522237, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6068, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.9323335771762986, | |
| "grad_norm": 0.3468540827688793, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6074, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.9615947329919532, | |
| "grad_norm": 0.3301547297982186, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5962, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.9908558888076078, | |
| "grad_norm": 0.353549889665131, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6107, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.9967081199707388, | |
| "eval_loss": 0.6333796381950378, | |
| "eval_runtime": 344.9128, | |
| "eval_samples_per_second": 26.699, | |
| "eval_steps_per_second": 0.417, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 2.0212143379663496, | |
| "grad_norm": 0.38522584628732043, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6214, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.050475493782004, | |
| "grad_norm": 0.4092650769853219, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5693, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.0797366495976592, | |
| "grad_norm": 0.36395160858693937, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5712, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.108997805413314, | |
| "grad_norm": 0.36987210556495065, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5723, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.1382589612289684, | |
| "grad_norm": 0.3530004884029566, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5741, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.1675201170446234, | |
| "grad_norm": 0.3474755062723768, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5794, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.196781272860278, | |
| "grad_norm": 0.3203177683782767, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5781, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.2260424286759326, | |
| "grad_norm": 0.3661319457012291, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5755, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.255303584491587, | |
| "grad_norm": 0.3359421587293624, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5742, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.284564740307242, | |
| "grad_norm": 0.3332624185731929, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5733, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.313825896122897, | |
| "grad_norm": 0.3326965713448184, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5716, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.3430870519385514, | |
| "grad_norm": 0.3617619945788122, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5794, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.3723482077542064, | |
| "grad_norm": 0.3298127671828895, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5746, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.401609363569861, | |
| "grad_norm": 0.332422697888563, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5808, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.4308705193855156, | |
| "grad_norm": 0.3196015454900321, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5731, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.4601316752011706, | |
| "grad_norm": 0.35331559179835964, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5736, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.489392831016825, | |
| "grad_norm": 0.3392297184730635, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5758, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.5186539868324798, | |
| "grad_norm": 0.34620746539815944, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5688, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.547915142648135, | |
| "grad_norm": 0.4023501023219655, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5712, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.5771762984637894, | |
| "grad_norm": 0.34088303079875626, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5754, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.606437454279444, | |
| "grad_norm": 0.36405494880778144, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5748, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.6356986100950985, | |
| "grad_norm": 0.34721347207329656, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5794, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.6649597659107536, | |
| "grad_norm": 0.3546551945507394, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5795, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.694220921726408, | |
| "grad_norm": 0.36027986998179906, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5738, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.723482077542063, | |
| "grad_norm": 0.35159220496145294, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5775, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.7527432333577178, | |
| "grad_norm": 0.3504714185096239, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5764, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.7820043891733723, | |
| "grad_norm": 0.3284737703409318, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5751, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.811265544989027, | |
| "grad_norm": 0.37305249319315187, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5789, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.840526700804682, | |
| "grad_norm": 0.33884604095107257, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5747, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.8697878566203365, | |
| "grad_norm": 0.37958678053577166, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5745, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.899049012435991, | |
| "grad_norm": 0.34410472212682797, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5675, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.928310168251646, | |
| "grad_norm": 0.3222728501974356, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5753, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.9575713240673007, | |
| "grad_norm": 0.3391773666930332, | |
| "learning_rate": 5e-06, | |
| "loss": 0.567, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.9868324798829553, | |
| "grad_norm": 0.33304266357077267, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5787, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.9956108266276518, | |
| "eval_loss": 0.6330468654632568, | |
| "eval_runtime": 344.0095, | |
| "eval_samples_per_second": 26.77, | |
| "eval_steps_per_second": 0.419, | |
| "step": 1023 | |
| }, | |
| { | |
| "epoch": 2.9956108266276518, | |
| "step": 1023, | |
| "total_flos": 2144987064041472.0, | |
| "train_loss": 0.6152277645360923, | |
| "train_runtime": 54853.139, | |
| "train_samples_per_second": 9.569, | |
| "train_steps_per_second": 0.019 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1023, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2144987064041472.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |