ppo-Pyramids / run_logs /timers.json
thackerhelik's picture
First training of Pyramids
253f077
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1382765769958496,
"min": 0.12111399322748184,
"max": 1.4385684728622437,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4104.048828125,
"min": 3631.48193359375,
"max": 43640.4140625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999887.0,
"min": 29894.0,
"max": 2999887.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999887.0,
"min": 29894.0,
"max": 2999887.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8311744332313538,
"min": -0.09597834944725037,
"max": 0.8873137831687927,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 251.84585571289062,
"min": -23.130783081054688,
"max": 270.6307067871094,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.01873818412423134,
"min": -0.04131808131933212,
"max": 0.35275834798812866,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -5.677669525146484,
"min": -10.990610122680664,
"max": 83.60372924804688,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0688515912763597,
"min": 0.06465385610550603,
"max": 0.07358755116140292,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9639222778690358,
"min": 0.4911521585014563,
"max": 1.056314365375632,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016004676943079436,
"min": 0.00011662974973861488,
"max": 0.017746448900739085,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2240654772031121,
"min": 0.0015161867466019934,
"max": 0.24845028461034718,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5728209043309504e-06,
"min": 1.5728209043309504e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2019492660633304e-05,
"min": 2.2019492660633304e-05,
"max": 0.003969096276967933,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052424047619046,
"min": 0.10052424047619046,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073393666666665,
"min": 1.3962282666666668,
"max": 2.7674941666666664,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.237162357142852e-05,
"min": 6.237162357142852e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008732027299999993,
"min": 0.0008732027299999993,
"max": 0.13231090346000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005259473342448473,
"min": 0.005012875888496637,
"max": 0.33718761801719666,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0736326277256012,
"min": 0.0701802596449852,
"max": 2.3603134155273438,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 220.83703703703705,
"min": 202.2123287671233,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29813.0,
"min": 16821.0,
"max": 32466.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7791629473368327,
"min": -0.9998645676720527,
"max": 1.7900349525930164,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 240.1869978904724,
"min": -30.995801597833633,
"max": 260.47619891166687,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7791629473368327,
"min": -0.9998645676720527,
"max": 1.7900349525930164,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 240.1869978904724,
"min": -30.995801597833633,
"max": 260.47619891166687,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01199025677442141,
"min": 0.010976438742533266,
"max": 6.778153168804505,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6186846645468904,
"min": 1.541343480872456,
"max": 115.22860386967659,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685970784",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685979328"
},
"total": 8544.076399173,
"count": 1,
"self": 0.5269769370006543,
"children": {
"run_training.setup": {
"total": 0.04207163000000946,
"count": 1,
"self": 0.04207163000000946
},
"TrainerController.start_learning": {
"total": 8543.507350606,
"count": 1,
"self": 5.722510671981581,
"children": {
"TrainerController._reset_env": {
"total": 3.9737581080000837,
"count": 1,
"self": 3.9737581080000837
},
"TrainerController.advance": {
"total": 8533.707507334018,
"count": 195630,
"self": 5.53773860558249,
"children": {
"env_step": {
"total": 6439.287286941061,
"count": 195630,
"self": 6029.159110495299,
"children": {
"SubprocessEnvManager._take_step": {
"total": 406.6687730009826,
"count": 195630,
"self": 17.480813359135254,
"children": {
"TorchPolicy.evaluate": {
"total": 389.1879596418473,
"count": 187542,
"self": 389.1879596418473
}
}
},
"workers": {
"total": 3.459403444779582,
"count": 195630,
"self": 0.0,
"children": {
"worker_root": {
"total": 8522.463237560174,
"count": 195630,
"is_parallel": true,
"self": 2920.9714966299525,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002031845000146859,
"count": 1,
"is_parallel": true,
"self": 0.0006689830001960217,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013628619999508373,
"count": 8,
"is_parallel": true,
"self": 0.0013628619999508373
}
}
},
"UnityEnvironment.step": {
"total": 0.05311747300015668,
"count": 1,
"is_parallel": true,
"self": 0.0006409000000076048,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006198799999310722,
"count": 1,
"is_parallel": true,
"self": 0.0006198799999310722
},
"communicator.exchange": {
"total": 0.04978134500015585,
"count": 1,
"is_parallel": true,
"self": 0.04978134500015585
},
"steps_from_proto": {
"total": 0.0020753480000621494,
"count": 1,
"is_parallel": true,
"self": 0.000415476999933162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016598710001289874,
"count": 8,
"is_parallel": true,
"self": 0.0016598710001289874
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5601.491740930222,
"count": 195629,
"is_parallel": true,
"self": 111.8233970777419,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.60930292363855,
"count": 195629,
"is_parallel": true,
"self": 83.60930292363855
},
"communicator.exchange": {
"total": 5033.057042800834,
"count": 195629,
"is_parallel": true,
"self": 5033.057042800834
},
"steps_from_proto": {
"total": 373.00199812800724,
"count": 195629,
"is_parallel": true,
"self": 80.48771559415036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 292.5142825338569,
"count": 1565032,
"is_parallel": true,
"self": 292.5142825338569
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2088.882481787375,
"count": 195630,
"self": 10.658780530512104,
"children": {
"process_trajectory": {
"total": 393.08952418386093,
"count": 195630,
"self": 392.3703999118609,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7191242720000446,
"count": 6,
"self": 0.7191242720000446
}
}
},
"_update_policy": {
"total": 1685.1341770730023,
"count": 1401,
"self": 1089.4799498760062,
"children": {
"TorchPPOOptimizer.update": {
"total": 595.6542271969961,
"count": 68379,
"self": 595.6542271969961
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2620002962648869e-06,
"count": 1,
"self": 1.2620002962648869e-06
},
"TrainerController._save_models": {
"total": 0.10357322999880125,
"count": 1,
"self": 0.001511102998847491,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10206212699995376,
"count": 1,
"self": 0.10206212699995376
}
}
}
}
}
}
}