| { | |
| "best_metric": 0.9330014224751066, | |
| "best_model_checkpoint": "mobilenet_v2_1.0_224-plant-disease/checkpoint-474", | |
| "epoch": 2.995260663507109, | |
| "eval_steps": 500, | |
| "global_step": 474, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 3.7019, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 3.6465, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 3.125e-05, | |
| "loss": 3.5559, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 3.39, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.976525821596245e-05, | |
| "loss": 3.1978, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 4.8591549295774653e-05, | |
| "loss": 2.9395, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.741784037558686e-05, | |
| "loss": 2.6478, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.624413145539906e-05, | |
| "loss": 2.3708, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.507042253521127e-05, | |
| "loss": 2.1101, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.389671361502348e-05, | |
| "loss": 1.8644, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.2723004694835684e-05, | |
| "loss": 1.6206, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 4.154929577464789e-05, | |
| "loss": 1.4544, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.037558685446009e-05, | |
| "loss": 1.2805, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 3.9201877934272305e-05, | |
| "loss": 1.1652, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 3.802816901408451e-05, | |
| "loss": 1.0369, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.841678520625889, | |
| "eval_loss": 0.911638617515564, | |
| "eval_runtime": 29.0643, | |
| "eval_samples_per_second": 241.878, | |
| "eval_steps_per_second": 2.443, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 3.6854460093896714e-05, | |
| "loss": 0.9371, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 3.568075117370892e-05, | |
| "loss": 0.8671, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.450704225352113e-05, | |
| "loss": 0.8125, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.7496, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.215962441314554e-05, | |
| "loss": 0.7056, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.0985915492957744e-05, | |
| "loss": 0.6473, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 2.9812206572769952e-05, | |
| "loss": 0.6367, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 2.8638497652582164e-05, | |
| "loss": 0.6065, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 2.746478873239437e-05, | |
| "loss": 0.5866, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 2.6291079812206577e-05, | |
| "loss": 0.5493, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 2.511737089201878e-05, | |
| "loss": 0.517, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 2.3943661971830986e-05, | |
| "loss": 0.5116, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 2.2769953051643194e-05, | |
| "loss": 0.484, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 2.1596244131455402e-05, | |
| "loss": 0.4749, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 2.0422535211267607e-05, | |
| "loss": 0.4907, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 1.9248826291079812e-05, | |
| "loss": 0.4523, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9038406827880512, | |
| "eval_loss": 0.4555762708187103, | |
| "eval_runtime": 28.2946, | |
| "eval_samples_per_second": 248.458, | |
| "eval_steps_per_second": 2.509, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 1.807511737089202e-05, | |
| "loss": 0.445, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 1.6901408450704224e-05, | |
| "loss": 0.4667, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 1.5727699530516433e-05, | |
| "loss": 0.4412, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 1.4553990610328639e-05, | |
| "loss": 0.441, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 1.3380281690140845e-05, | |
| "loss": 0.4042, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 1.2206572769953052e-05, | |
| "loss": 0.3967, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 1.1032863849765258e-05, | |
| "loss": 0.4057, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 9.859154929577465e-06, | |
| "loss": 0.4034, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 8.685446009389673e-06, | |
| "loss": 0.3852, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 7.511737089201878e-06, | |
| "loss": 0.4029, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 6.338028169014085e-06, | |
| "loss": 0.3827, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 5.164319248826292e-06, | |
| "loss": 0.3949, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 3.990610328638498e-06, | |
| "loss": 0.3889, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 2.8169014084507042e-06, | |
| "loss": 0.3895, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 1.643192488262911e-06, | |
| "loss": 0.3807, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 4.694835680751174e-07, | |
| "loss": 0.3848, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9330014224751066, | |
| "eval_loss": 0.3579290211200714, | |
| "eval_runtime": 28.182, | |
| "eval_samples_per_second": 249.45, | |
| "eval_steps_per_second": 2.519, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 474, | |
| "total_flos": 5.08093741596672e+17, | |
| "train_loss": 1.111411559682355, | |
| "train_runtime": 1716.5107, | |
| "train_samples_per_second": 110.57, | |
| "train_steps_per_second": 0.276 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 474, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 5.08093741596672e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |