ppo-Pyramids / run_logs /timers.json
ShuwenZheng's picture
First Push
b3576ee
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7408730387687683,
"min": 0.7256978154182434,
"max": 1.4059088230133057,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22012.8203125,
"min": 21678.044921875,
"max": 42649.6484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.08190051466226578,
"min": -0.11029922962188721,
"max": 0.1130589172244072,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 20.147525787353516,
"min": -26.582115173339844,
"max": 28.264728546142578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.005988653749227524,
"min": -0.005988653749227524,
"max": 0.2272021472454071,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.4732087850570679,
"min": -1.4732087850570679,
"max": 53.84690856933594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06862332884901194,
"min": 0.06351672026971308,
"max": 0.07364696889957859,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9607266038861673,
"min": 0.48982802991492363,
"max": 1.0330682859348599,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008528645520632242,
"min": 0.000161567621761852,
"max": 0.008958733257410735,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11940103728885137,
"min": 0.002067075791322369,
"max": 0.13438099886116103,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.646583165457138e-06,
"min": 7.646583165457138e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010705216431639993,
"min": 0.00010705216431639993,
"max": 0.0031441271519577,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254882857142858,
"min": 0.10254882857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356836000000002,
"min": 1.3886848,
"max": 2.3480423,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026462797428571414,
"min": 0.00026462797428571414,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003704791639999998,
"min": 0.003704791639999998,
"max": 0.10482942577,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007577203214168549,
"min": 0.007577203214168549,
"max": 0.26895540952682495,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10608084499835968,
"min": 0.10608084499835968,
"max": 1.8826878070831299,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 762.2051282051282,
"min": 742.439024390244,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29726.0,
"min": 15984.0,
"max": 32664.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.2631127859155337,
"min": -1.0000000521540642,
"max": 0.5256682574385549,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 10.261398650705814,
"min": -31.99920167028904,
"max": 21.552398554980755,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.2631127859155337,
"min": -1.0000000521540642,
"max": 0.5256682574385549,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 10.261398650705814,
"min": -31.99920167028904,
"max": 21.552398554980755,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.060054309810743105,
"min": 0.060054309810743105,
"max": 5.330888848286122,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.342118082618981,
"min": 2.342118082618981,
"max": 85.29422157257795,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692270301",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692272419"
},
"total": 2117.5034729599997,
"count": 1,
"self": 0.4874810289998095,
"children": {
"run_training.setup": {
"total": 0.06359278600029938,
"count": 1,
"self": 0.06359278600029938
},
"TrainerController.start_learning": {
"total": 2116.9523991449996,
"count": 1,
"self": 1.3452333810960226,
"children": {
"TrainerController._reset_env": {
"total": 6.48626550400013,
"count": 1,
"self": 6.48626550400013
},
"TrainerController.advance": {
"total": 2109.016387582903,
"count": 63175,
"self": 1.3950699109063862,
"children": {
"env_step": {
"total": 1446.0760474240046,
"count": 63175,
"self": 1336.5401860150891,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.69718923088203,
"count": 63175,
"self": 4.654513970873268,
"children": {
"TorchPolicy.evaluate": {
"total": 104.04267526000876,
"count": 62555,
"self": 104.04267526000876
}
}
},
"workers": {
"total": 0.8386721780334483,
"count": 63175,
"self": 0.0,
"children": {
"worker_root": {
"total": 2112.3840987389763,
"count": 63175,
"is_parallel": true,
"self": 887.9632015380353,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002764348999789945,
"count": 1,
"is_parallel": true,
"self": 0.0007742509992567648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019900980005331803,
"count": 8,
"is_parallel": true,
"self": 0.0019900980005331803
}
}
},
"UnityEnvironment.step": {
"total": 0.09372274599991215,
"count": 1,
"is_parallel": true,
"self": 0.0006201639998835162,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005425370000011753,
"count": 1,
"is_parallel": true,
"self": 0.0005425370000011753
},
"communicator.exchange": {
"total": 0.09024433200011117,
"count": 1,
"is_parallel": true,
"self": 0.09024433200011117
},
"steps_from_proto": {
"total": 0.002315712999916286,
"count": 1,
"is_parallel": true,
"self": 0.0004095479994248308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019061650004914554,
"count": 8,
"is_parallel": true,
"self": 0.0019061650004914554
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1224.420897200941,
"count": 63174,
"is_parallel": true,
"self": 33.890451529978236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.072896974105788,
"count": 63174,
"is_parallel": true,
"self": 23.072896974105788
},
"communicator.exchange": {
"total": 1063.0539335889266,
"count": 63174,
"is_parallel": true,
"self": 1063.0539335889266
},
"steps_from_proto": {
"total": 104.40361510793036,
"count": 63174,
"is_parallel": true,
"self": 20.32481236993499,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.07880273799537,
"count": 505392,
"is_parallel": true,
"self": 84.07880273799537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 661.545270247992,
"count": 63175,
"self": 2.481973563979409,
"children": {
"process_trajectory": {
"total": 108.6138666520169,
"count": 63175,
"self": 108.38428368001723,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22958297199966182,
"count": 2,
"self": 0.22958297199966182
}
}
},
"_update_policy": {
"total": 550.4494300319957,
"count": 441,
"self": 359.19104461902634,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.25838541296935,
"count": 22731,
"self": 191.25838541296935
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.370006980840117e-07,
"count": 1,
"self": 8.370006980840117e-07
},
"TrainerController._save_models": {
"total": 0.1045118399997591,
"count": 1,
"self": 0.0013871490000383346,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10312469099972077,
"count": 1,
"self": 0.10312469099972077
}
}
}
}
}
}
}