xian79's picture
Pyramids RNG 1M steps
5e97298
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5274935960769653,
"min": 0.5231553912162781,
"max": 1.5002342462539673,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15757.2890625,
"min": 15753.255859375,
"max": 45511.10546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29916.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29916.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5230244398117065,
"min": -0.1941283941268921,
"max": 0.5664151310920715,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 141.2165985107422,
"min": -46.00843048095703,
"max": 154.06491088867188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013818386010825634,
"min": -0.015613694675266743,
"max": 0.39116039872169495,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.730964183807373,
"min": -4.0283331871032715,
"max": 92.70501708984375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06905909625589012,
"min": 0.06488851732540164,
"max": 0.07568647477151012,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9668273475824617,
"min": 0.5298053234005708,
"max": 1.0552841023080892,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01563128663261365,
"min": 0.000941762691836455,
"max": 0.016589948806258865,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2188380128565911,
"min": 0.012242914993873914,
"max": 0.248849232093883,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.242140443128573e-06,
"min": 7.242140443128573e-06,
"max": 0.0002952344587313714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010138996620380002,
"min": 0.00010138996620380002,
"max": 0.0036086640971119996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241401428571431,
"min": 0.10241401428571431,
"max": 0.1984114857142857,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4337962000000004,
"min": 1.3888804,
"max": 2.5698872,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002511600271428572,
"min": 0.0002511600271428572,
"max": 0.009841307422857142,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003516240380000001,
"min": 0.003516240380000001,
"max": 0.12029851120000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007215626537799835,
"min": 0.007215626537799835,
"max": 0.3713221848011017,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10101877152919769,
"min": 0.10101877152919769,
"max": 2.599255323410034,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 347.6666666666667,
"min": 341.6219512195122,
"max": 988.1666666666666,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27118.0,
"min": 16715.0,
"max": 33313.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5240743399048462,
"min": -0.922380052258571,
"max": 1.6095829104504935,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 118.87779851257801,
"min": -29.34320167452097,
"max": 138.01419849693775,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5240743399048462,
"min": -0.922380052258571,
"max": 1.6095829104504935,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 118.87779851257801,
"min": -29.34320167452097,
"max": 138.01419849693775,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026286155850707125,
"min": 0.026286155850707125,
"max": 7.310455685152727,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.0503201563551556,
"min": 2.0503201563551556,
"max": 124.27774664759636,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688727210",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688729398"
},
"total": 2187.9549706649996,
"count": 1,
"self": 0.4801150809989849,
"children": {
"run_training.setup": {
"total": 0.04267251800001759,
"count": 1,
"self": 0.04267251800001759
},
"TrainerController.start_learning": {
"total": 2187.4321830660006,
"count": 1,
"self": 1.3012374480417748,
"children": {
"TrainerController._reset_env": {
"total": 5.397220674999971,
"count": 1,
"self": 5.397220674999971
},
"TrainerController.advance": {
"total": 2180.642382080959,
"count": 63899,
"self": 1.3870829129891717,
"children": {
"env_step": {
"total": 1525.7196155530196,
"count": 63899,
"self": 1418.204149398885,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.72786078104537,
"count": 63899,
"self": 4.752492408052603,
"children": {
"TorchPolicy.evaluate": {
"total": 101.97536837299276,
"count": 62547,
"self": 101.97536837299276
}
}
},
"workers": {
"total": 0.7876053730892636,
"count": 63899,
"self": 0.0,
"children": {
"worker_root": {
"total": 2182.338344664983,
"count": 63899,
"is_parallel": true,
"self": 877.0894966749515,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025569929998709995,
"count": 1,
"is_parallel": true,
"self": 0.00069399199946929,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018630010004017095,
"count": 8,
"is_parallel": true,
"self": 0.0018630010004017095
}
}
},
"UnityEnvironment.step": {
"total": 0.051140864999979385,
"count": 1,
"is_parallel": true,
"self": 0.000587977000122919,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005024120000598487,
"count": 1,
"is_parallel": true,
"self": 0.0005024120000598487
},
"communicator.exchange": {
"total": 0.048174875999848155,
"count": 1,
"is_parallel": true,
"self": 0.048174875999848155
},
"steps_from_proto": {
"total": 0.001875599999948463,
"count": 1,
"is_parallel": true,
"self": 0.0003715299994837551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015040700004647078,
"count": 8,
"is_parallel": true,
"self": 0.0015040700004647078
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1305.2488479900317,
"count": 63898,
"is_parallel": true,
"self": 33.85216935807102,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.693452283007446,
"count": 63898,
"is_parallel": true,
"self": 22.693452283007446
},
"communicator.exchange": {
"total": 1144.4233290389366,
"count": 63898,
"is_parallel": true,
"self": 1144.4233290389366
},
"steps_from_proto": {
"total": 104.27989731001662,
"count": 63898,
"is_parallel": true,
"self": 19.899585997902705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.38031131211392,
"count": 511184,
"is_parallel": true,
"self": 84.38031131211392
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 653.5356836149504,
"count": 63899,
"self": 2.6334499289787345,
"children": {
"process_trajectory": {
"total": 111.22049522896987,
"count": 63899,
"self": 111.0158897339702,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20460549499966874,
"count": 2,
"self": 0.20460549499966874
}
}
},
"_update_policy": {
"total": 539.6817384570018,
"count": 458,
"self": 349.26658824291553,
"children": {
"TorchPPOOptimizer.update": {
"total": 190.41515021408622,
"count": 22761,
"self": 190.41515021408622
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0099993232870474e-06,
"count": 1,
"self": 1.0099993232870474e-06
},
"TrainerController._save_models": {
"total": 0.09134185200036882,
"count": 1,
"self": 0.0013465530000758008,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08999529900029302,
"count": 1,
"self": 0.08999529900029302
}
}
}
}
}
}
}