giocs2017's picture
First Push
1fc2aa8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5058273077011108,
"min": 0.5058273077011108,
"max": 1.3987865447998047,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15182.912109375,
"min": 15106.75390625,
"max": 42433.58984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6168438196182251,
"min": -0.08122702687978745,
"max": 0.6168438196182251,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 173.949951171875,
"min": -19.656940460205078,
"max": 173.949951171875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.019638346508145332,
"min": -0.019638346508145332,
"max": 0.25809887051582336,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -5.538013458251953,
"min": -5.538013458251953,
"max": 61.9437255859375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07186779868719721,
"min": 0.06388730737488293,
"max": 0.07220879639943334,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.006149181620761,
"min": 0.4976628560200869,
"max": 1.0677849066560157,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014940973777681521,
"min": 0.000823871044255936,
"max": 0.016928747193918268,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2091736328875413,
"min": 0.007625903233992145,
"max": 0.253931207908774,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3395975534999975e-06,
"min": 7.3395975534999975e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010275436574899996,
"min": 0.00010275436574899996,
"max": 0.0035075363308213,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024465,
"min": 0.1024465,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434251,
"min": 1.3691136000000002,
"max": 2.5691787,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002544053499999999,
"min": 0.0002544053499999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035616748999999985,
"min": 0.0035616748999999985,
"max": 0.11694095213000004,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009017963893711567,
"min": 0.009017963893711567,
"max": 0.46099159121513367,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1262514889240265,
"min": 0.1262514889240265,
"max": 3.2269411087036133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 312.1914893617021,
"min": 302.68,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29346.0,
"min": 15984.0,
"max": 32600.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.666529764044792,
"min": -1.0000000521540642,
"max": 1.6992323023803306,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.65379782021046,
"min": -32.000001668930054,
"max": 168.22399793565273,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.666529764044792,
"min": -1.0000000521540642,
"max": 1.6992323023803306,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.65379782021046,
"min": -32.000001668930054,
"max": 168.22399793565273,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029204576949036602,
"min": 0.029204576949036602,
"max": 8.814843412488699,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7452302332094405,
"min": 2.7398777788621373,
"max": 141.03749459981918,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689721713",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689724120"
},
"total": 2406.9827441939997,
"count": 1,
"self": 0.47909773599985783,
"children": {
"run_training.setup": {
"total": 0.041664409999839336,
"count": 1,
"self": 0.041664409999839336
},
"TrainerController.start_learning": {
"total": 2406.461982048,
"count": 1,
"self": 1.4672575149024851,
"children": {
"TrainerController._reset_env": {
"total": 4.111460921999878,
"count": 1,
"self": 4.111460921999878
},
"TrainerController.advance": {
"total": 2400.7782376250975,
"count": 63858,
"self": 1.512160880105057,
"children": {
"env_step": {
"total": 1706.7683907799974,
"count": 63858,
"self": 1586.265506789116,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.64083582196372,
"count": 63858,
"self": 5.115557743981526,
"children": {
"TorchPolicy.evaluate": {
"total": 114.5252780779822,
"count": 62559,
"self": 114.5252780779822
}
}
},
"workers": {
"total": 0.8620481689176813,
"count": 63858,
"self": 0.0,
"children": {
"worker_root": {
"total": 2400.6435004579494,
"count": 63858,
"is_parallel": true,
"self": 938.967263501959,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002155462000018815,
"count": 1,
"is_parallel": true,
"self": 0.0006303840002601646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015250779997586505,
"count": 8,
"is_parallel": true,
"self": 0.0015250779997586505
}
}
},
"UnityEnvironment.step": {
"total": 0.053580919000069116,
"count": 1,
"is_parallel": true,
"self": 0.0005619740002202889,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004973469999640656,
"count": 1,
"is_parallel": true,
"self": 0.0004973469999640656
},
"communicator.exchange": {
"total": 0.05053480099991248,
"count": 1,
"is_parallel": true,
"self": 0.05053480099991248
},
"steps_from_proto": {
"total": 0.0019867969999722845,
"count": 1,
"is_parallel": true,
"self": 0.0003928549999727693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015939419999995152,
"count": 8,
"is_parallel": true,
"self": 0.0015939419999995152
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1461.6762369559904,
"count": 63857,
"is_parallel": true,
"self": 35.34888076200582,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.098713445080648,
"count": 63857,
"is_parallel": true,
"self": 25.098713445080648
},
"communicator.exchange": {
"total": 1285.4651235589042,
"count": 63857,
"is_parallel": true,
"self": 1285.4651235589042
},
"steps_from_proto": {
"total": 115.76351918999967,
"count": 63857,
"is_parallel": true,
"self": 22.71590084413492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.04761834586475,
"count": 510856,
"is_parallel": true,
"self": 93.04761834586475
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 692.4976859649951,
"count": 63858,
"self": 2.744200517986883,
"children": {
"process_trajectory": {
"total": 121.81872224700919,
"count": 63858,
"self": 121.53939231100912,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2793299360000674,
"count": 2,
"self": 0.2793299360000674
}
}
},
"_update_policy": {
"total": 567.934763199999,
"count": 451,
"self": 367.22143492799,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.713328272009,
"count": 22851,
"self": 200.713328272009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.73000169324223e-07,
"count": 1,
"self": 9.73000169324223e-07
},
"TrainerController._save_models": {
"total": 0.10502501300015865,
"count": 1,
"self": 0.001445975000024191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10357903800013446,
"count": 1,
"self": 0.10357903800013446
}
}
}
}
}
}
}