ppo-Huggy / run_logs /timers.json
tg825590's picture
Huggy
35aa63b
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3960790634155273,
"min": 1.3960790634155273,
"max": 1.4228882789611816,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69299.96875,
"min": 68327.421875,
"max": 78425.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.49040511727078,
"min": 82.33166666666666,
"max": 400.23809523809524,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49475.0,
"min": 49069.0,
"max": 50430.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999976.0,
"min": 49806.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999976.0,
"min": 49806.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.332348346710205,
"min": -0.02482997439801693,
"max": 2.45263934135437,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1093.871337890625,
"min": -3.1037468910217285,
"max": 1401.760009765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4832045424467464,
"min": 1.9119500403404235,
"max": 3.9235784834231473,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1633.622930407524,
"min": 238.99375504255295,
"max": 2267.828363418579,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4832045424467464,
"min": 1.9119500403404235,
"max": 3.9235784834231473,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1633.622930407524,
"min": 238.99375504255295,
"max": 2267.828363418579,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015039324720985153,
"min": 0.012809203745564445,
"max": 0.021040869372599344,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04511797416295546,
"min": 0.02561840749112889,
"max": 0.06312260811779803,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.047835016912884186,
"min": 0.022919661737978457,
"max": 0.1534860470642646,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14350505073865255,
"min": 0.045839323475956914,
"max": 0.3069720941285292,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5279988240333395e-06,
"min": 3.5279988240333395e-06,
"max": 0.00029532502655832497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0583996472100018e-05,
"min": 1.0583996472100018e-05,
"max": 0.0008443338185554,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117596666666669,
"min": 0.10117596666666669,
"max": 0.19844167499999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035279000000001,
"min": 0.2074935,
"max": 0.5814445999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.868073666666678e-05,
"min": 6.868073666666678e-05,
"max": 0.0049222395825,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020604221000000036,
"min": 0.00020604221000000036,
"max": 0.014074085540000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682476049",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682478465"
},
"total": 2416.442502388,
"count": 1,
"self": 0.759403311999904,
"children": {
"run_training.setup": {
"total": 0.19771993699998802,
"count": 1,
"self": 0.19771993699998802
},
"TrainerController.start_learning": {
"total": 2415.485379139,
"count": 1,
"self": 4.423246079998989,
"children": {
"TrainerController._reset_env": {
"total": 4.686489497000025,
"count": 1,
"self": 4.686489497000025
},
"TrainerController.advance": {
"total": 2406.251209567001,
"count": 231893,
"self": 5.018179593027071,
"children": {
"env_step": {
"total": 1880.3876431369872,
"count": 231893,
"self": 1592.07582907095,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.3538245529714,
"count": 231893,
"self": 17.408280314920376,
"children": {
"TorchPolicy.evaluate": {
"total": 267.945544238051,
"count": 222943,
"self": 267.945544238051
}
}
},
"workers": {
"total": 2.9579895130657974,
"count": 231893,
"self": 0.0,
"children": {
"worker_root": {
"total": 2407.3772240460303,
"count": 231893,
"is_parallel": true,
"self": 1105.4579545099955,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001099018999980217,
"count": 1,
"is_parallel": true,
"self": 0.00036172699992675916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007372920000534577,
"count": 2,
"is_parallel": true,
"self": 0.0007372920000534577
}
}
},
"UnityEnvironment.step": {
"total": 0.030142233999981727,
"count": 1,
"is_parallel": true,
"self": 0.0003001829999789152,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002510219999862784,
"count": 1,
"is_parallel": true,
"self": 0.0002510219999862784
},
"communicator.exchange": {
"total": 0.028853115999993406,
"count": 1,
"is_parallel": true,
"self": 0.028853115999993406
},
"steps_from_proto": {
"total": 0.0007379130000231271,
"count": 1,
"is_parallel": true,
"self": 0.00023319800004628632,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005047149999768408,
"count": 2,
"is_parallel": true,
"self": 0.0005047149999768408
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1301.9192695360348,
"count": 231892,
"is_parallel": true,
"self": 38.68974188197194,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.64802933709075,
"count": 231892,
"is_parallel": true,
"self": 81.64802933709075
},
"communicator.exchange": {
"total": 1091.7474967920093,
"count": 231892,
"is_parallel": true,
"self": 1091.7474967920093
},
"steps_from_proto": {
"total": 89.83400152496273,
"count": 231892,
"is_parallel": true,
"self": 33.74604899587905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.08795252908368,
"count": 463784,
"is_parallel": true,
"self": 56.08795252908368
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 520.8453868369866,
"count": 231893,
"self": 6.745547907071341,
"children": {
"process_trajectory": {
"total": 134.66724047691565,
"count": 231893,
"self": 133.2273397719154,
"children": {
"RLTrainer._checkpoint": {
"total": 1.439900705000241,
"count": 10,
"self": 1.439900705000241
}
}
},
"_update_policy": {
"total": 379.43259845299957,
"count": 97,
"self": 319.37727546699455,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.055322986005024,
"count": 2910,
"self": 60.055322986005024
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0650001058820635e-06,
"count": 1,
"self": 1.0650001058820635e-06
},
"TrainerController._save_models": {
"total": 0.12443293000023914,
"count": 1,
"self": 0.0021581150003839866,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12227481499985515,
"count": 1,
"self": 0.12227481499985515
}
}
}
}
}
}
}