ppo-Huggy / run_logs /timers.json
tmpusr's picture
Huggy
65c4417
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4041839838027954,
"min": 1.4041839838027954,
"max": 1.4300827980041504,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72733.921875,
"min": 67676.5390625,
"max": 77147.0859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 117.49640287769785,
"min": 82.65551839464882,
"max": 396.21259842519686,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48996.0,
"min": 48835.0,
"max": 50319.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999397.0,
"min": 49847.0,
"max": 1999397.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999397.0,
"min": 49847.0,
"max": 1999397.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.324444055557251,
"min": 0.11477182060480118,
"max": 2.452869176864624,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 969.293212890625,
"min": 14.461249351501465,
"max": 1414.686767578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4767408077951245,
"min": 1.6305028855327577,
"max": 3.947500212914353,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1449.8009168505669,
"min": 205.44336357712746,
"max": 2278.053837776184,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4767408077951245,
"min": 1.6305028855327577,
"max": 3.947500212914353,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1449.8009168505669,
"min": 205.44336357712746,
"max": 2278.053837776184,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017321069450958425,
"min": 0.014449412931571714,
"max": 0.02058575337254701,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051963208352875274,
"min": 0.028898825863143428,
"max": 0.06175726011764103,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.042278398945927616,
"min": 0.022997312620282172,
"max": 0.06116506850553884,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.12683519683778285,
"min": 0.045994625240564344,
"max": 0.18349520551661652,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1999989333666663e-06,
"min": 3.1999989333666663e-06,
"max": 0.000295326676557775,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.599996800099999e-06,
"min": 9.599996800099999e-06,
"max": 0.0008441226186258,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106663333333334,
"min": 0.10106663333333334,
"max": 0.198442225,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031999,
"min": 0.20727880000000004,
"max": 0.5813742,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.322500333333334e-05,
"min": 6.322500333333334e-05,
"max": 0.0049222670275000015,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018967501000000003,
"min": 0.00018967501000000003,
"max": 0.014070572580000006,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685708110",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685710410"
},
"total": 2300.171253732,
"count": 1,
"self": 0.38333991800027434,
"children": {
"run_training.setup": {
"total": 0.04964124200000697,
"count": 1,
"self": 0.04964124200000697
},
"TrainerController.start_learning": {
"total": 2299.738272572,
"count": 1,
"self": 4.059408375935618,
"children": {
"TrainerController._reset_env": {
"total": 4.825521287000015,
"count": 1,
"self": 4.825521287000015
},
"TrainerController.advance": {
"total": 2290.738774717064,
"count": 231913,
"self": 4.211630250041253,
"children": {
"env_step": {
"total": 1796.9954654300607,
"count": 231913,
"self": 1522.4076295400912,
"children": {
"SubprocessEnvManager._take_step": {
"total": 271.92853249597806,
"count": 231913,
"self": 16.071227766023185,
"children": {
"TorchPolicy.evaluate": {
"total": 255.85730472995488,
"count": 223090,
"self": 255.85730472995488
}
}
},
"workers": {
"total": 2.659303393991422,
"count": 231913,
"self": 0.0,
"children": {
"worker_root": {
"total": 2292.3439300399973,
"count": 231913,
"is_parallel": true,
"self": 1047.2635425269602,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009737679999943794,
"count": 1,
"is_parallel": true,
"self": 0.00026710200006618834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000706665999928191,
"count": 2,
"is_parallel": true,
"self": 0.000706665999928191
}
}
},
"UnityEnvironment.step": {
"total": 0.03977237700007663,
"count": 1,
"is_parallel": true,
"self": 0.00036021100027028297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000242374999970707,
"count": 1,
"is_parallel": true,
"self": 0.000242374999970707
},
"communicator.exchange": {
"total": 0.0383854329999167,
"count": 1,
"is_parallel": true,
"self": 0.0383854329999167
},
"steps_from_proto": {
"total": 0.0007843579999189387,
"count": 1,
"is_parallel": true,
"self": 0.00031395799999245355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004703999999264852,
"count": 2,
"is_parallel": true,
"self": 0.0004703999999264852
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1245.080387513037,
"count": 231912,
"is_parallel": true,
"self": 37.71537839596772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.40436739207212,
"count": 231912,
"is_parallel": true,
"self": 76.40436739207212
},
"communicator.exchange": {
"total": 1040.3637982379123,
"count": 231912,
"is_parallel": true,
"self": 1040.3637982379123
},
"steps_from_proto": {
"total": 90.59684348708481,
"count": 231912,
"is_parallel": true,
"self": 32.824546676975274,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.77229681010954,
"count": 463824,
"is_parallel": true,
"self": 57.77229681010954
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.53167903696226,
"count": 231913,
"self": 6.386943553939545,
"children": {
"process_trajectory": {
"total": 126.36569445502255,
"count": 231913,
"self": 125.15064308502247,
"children": {
"RLTrainer._checkpoint": {
"total": 1.215051370000083,
"count": 10,
"self": 1.215051370000083
}
}
},
"_update_policy": {
"total": 356.77904102800017,
"count": 97,
"self": 299.5614548209926,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.21758620700757,
"count": 2910,
"self": 57.21758620700757
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.19000285648508e-07,
"count": 1,
"self": 9.19000285648508e-07
},
"TrainerController._save_models": {
"total": 0.11456727299992053,
"count": 1,
"self": 0.0019230130001233192,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11264425999979721,
"count": 1,
"self": 0.11264425999979721
}
}
}
}
}
}
}