dineshresearch's picture
First Push
3d18708
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35682806372642517,
"min": 0.3346475064754486,
"max": 1.5512598752975464,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10721.9697265625,
"min": 9980.52734375,
"max": 47059.01953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45596837997436523,
"min": -0.16926656663417816,
"max": 0.53659987449646,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 123.56742858886719,
"min": -40.793243408203125,
"max": 147.56497192382812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006324374116957188,
"min": 0.0007141218520700932,
"max": 0.3233257532119751,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7139053344726562,
"min": 0.17995870113372803,
"max": 76.62820434570312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06850312618238406,
"min": 0.06446809135506756,
"max": 0.0735797237087455,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9590437665533769,
"min": 0.4635333114433445,
"max": 1.1036958556311827,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01484861157461805,
"min": 0.0009923496977111712,
"max": 0.01583887216192023,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20788056204465272,
"min": 0.009820871781770641,
"max": 0.23758308242880347,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.50946178259286e-06,
"min": 7.50946178259286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010513246495630003,
"min": 0.00010513246495630003,
"max": 0.0036327256890915003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250312142857143,
"min": 0.10250312142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350437,
"min": 1.3691136000000002,
"max": 2.6109084999999994,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026006183071428584,
"min": 0.00026006183071428584,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003640865630000002,
"min": 0.003640865630000002,
"max": 0.12110975915000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009745752438902855,
"min": 0.009745752438902855,
"max": 0.3064081072807312,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13644053041934967,
"min": 0.13644053041934967,
"max": 2.1448566913604736,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 378.4578313253012,
"min": 361.5357142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31412.0,
"min": 15984.0,
"max": 32674.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4527999835919185,
"min": -1.0000000521540642,
"max": 1.5391132360301822,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.58239863812923,
"min": -32.000001668930054,
"max": 127.74639859050512,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4527999835919185,
"min": -1.0000000521540642,
"max": 1.5391132360301822,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.58239863812923,
"min": -32.000001668930054,
"max": 127.74639859050512,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03833495598449091,
"min": 0.03833495598449091,
"max": 6.116671589203179,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1818013467127457,
"min": 3.0807472558808513,
"max": 97.86674542725086,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678385758",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678388019"
},
"total": 2260.1201356200004,
"count": 1,
"self": 0.4896350649996748,
"children": {
"run_training.setup": {
"total": 0.11159209700008432,
"count": 1,
"self": 0.11159209700008432
},
"TrainerController.start_learning": {
"total": 2259.5189084580006,
"count": 1,
"self": 1.2800288019188883,
"children": {
"TrainerController._reset_env": {
"total": 7.9300925560000906,
"count": 1,
"self": 7.9300925560000906
},
"TrainerController.advance": {
"total": 2250.2212998970817,
"count": 63752,
"self": 1.383713854985217,
"children": {
"env_step": {
"total": 1594.1237894040169,
"count": 63752,
"self": 1481.520211151087,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.80907257999797,
"count": 63752,
"self": 5.272573593028028,
"children": {
"TorchPolicy.evaluate": {
"total": 106.53649898696995,
"count": 62573,
"self": 106.53649898696995
}
}
},
"workers": {
"total": 0.7945056729317912,
"count": 63752,
"self": 0.0,
"children": {
"worker_root": {
"total": 2254.5585236978845,
"count": 63752,
"is_parallel": true,
"self": 888.9097621558703,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017153399999187968,
"count": 1,
"is_parallel": true,
"self": 0.0005261480000626761,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011891919998561207,
"count": 8,
"is_parallel": true,
"self": 0.0011891919998561207
}
}
},
"UnityEnvironment.step": {
"total": 0.06701120400020955,
"count": 1,
"is_parallel": true,
"self": 0.0005011110001760244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004635539999071625,
"count": 1,
"is_parallel": true,
"self": 0.0004635539999071625
},
"communicator.exchange": {
"total": 0.06450139399976251,
"count": 1,
"is_parallel": true,
"self": 0.06450139399976251
},
"steps_from_proto": {
"total": 0.0015451450003638456,
"count": 1,
"is_parallel": true,
"self": 0.000391345000025467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011538000003383786,
"count": 8,
"is_parallel": true,
"self": 0.0011538000003383786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1365.6487615420142,
"count": 63751,
"is_parallel": true,
"self": 31.706104959838285,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.044795234070534,
"count": 63751,
"is_parallel": true,
"self": 23.044795234070534
},
"communicator.exchange": {
"total": 1217.0899062100789,
"count": 63751,
"is_parallel": true,
"self": 1217.0899062100789
},
"steps_from_proto": {
"total": 93.8079551380265,
"count": 63751,
"is_parallel": true,
"self": 19.55370439799708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.25425074002942,
"count": 510008,
"is_parallel": true,
"self": 74.25425074002942
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 654.7137966380797,
"count": 63752,
"self": 2.482613568177385,
"children": {
"process_trajectory": {
"total": 121.36867368890171,
"count": 63752,
"self": 121.12749515190262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24117853699908665,
"count": 2,
"self": 0.24117853699908665
}
}
},
"_update_policy": {
"total": 530.8625093810006,
"count": 450,
"self": 335.8394470780263,
"children": {
"TorchPPOOptimizer.update": {
"total": 195.02306230297427,
"count": 22839,
"self": 195.02306230297427
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1500005712150596e-06,
"count": 1,
"self": 1.1500005712150596e-06
},
"TrainerController._save_models": {
"total": 0.08748605299933843,
"count": 1,
"self": 0.001473029999033315,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08601302300030511,
"count": 1,
"self": 0.08601302300030511
}
}
}
}
}
}
}