Acrobot-v1 - erster (schlechter) Versuch
Browse files- Acrobot-v1.zip +2 -2
- Acrobot-v1/data +20 -20
- Acrobot-v1/policy.optimizer.pth +2 -2
- Acrobot-v1/policy.pth +2 -2
- Acrobot-v1/system_info.txt +1 -1
- README.md +1 -1
- config.json +1 -1
- replay.mp4 +0 -0
- results.json +1 -1
Acrobot-v1.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5b382259f15b572597c625b3bae21700f7989a7860ce414b1eafa9bc15a31e0
|
3 |
+
size 143156
|
Acrobot-v1/data
CHANGED
@@ -4,34 +4,34 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc._abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
24 |
-
"num_timesteps":
|
25 |
-
"_total_timesteps":
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
-
"start_time":
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": {
|
33 |
":type:": "<class 'numpy.ndarray'>",
|
34 |
-
":serialized:": "
|
35 |
},
|
36 |
"_last_episode_starts": {
|
37 |
":type:": "<class 'numpy.ndarray'>",
|
@@ -41,17 +41,17 @@
|
|
41 |
"_episode_num": 0,
|
42 |
"use_sde": false,
|
43 |
"sde_sample_freq": -1,
|
44 |
-
"_current_progress_remaining": -
|
45 |
"_stats_window_size": 100,
|
46 |
"ep_info_buffer": {
|
47 |
":type:": "<class 'collections.deque'>",
|
48 |
-
":serialized:": "
|
49 |
},
|
50 |
"ep_success_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
52 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
53 |
},
|
54 |
-
"_n_updates":
|
55 |
"observation_space": {
|
56 |
":type:": "<class 'gymnasium.spaces.box.Box'>",
|
57 |
":serialized:": "gAWVNAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/2w9JwdYx4sGUaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD/bD0lB1jHiQZRoC0sGhZRoGXSUUpSMCGxvd19yZXBylIxDWyAtMS4gICAgICAgIC0xLiAgICAgICAgLTEuICAgICAgICAtMS4gICAgICAgLTEyLjU2NjM3MSAtMjguMjc0MzM0XZSMCWhpZ2hfcmVwcpSMPVsgMS4gICAgICAgIDEuICAgICAgICAxLiAgICAgICAgMS4gICAgICAgMTIuNTY2MzcxIDI4LjI3NDMzNF2UjApfbnBfcmFuZG9tlE51Yi4=",
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x7a2c62e77760>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7a2c62e777f0>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7a2c62e77880>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7a2c62e77910>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x7a2c62e779a0>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x7a2c62e77a30>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x7a2c62e77ac0>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7a2c62e77b50>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x7a2c62e77be0>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7a2c62e77c70>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7a2c62e77d00>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x7a2c62e77d90>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc._abc_data object at 0x7a2c62e1d580>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
24 |
+
"num_timesteps": 1015808,
|
25 |
+
"_total_timesteps": 1000000,
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
+
"start_time": 1731776890927993344,
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
32 |
"_last_obs": {
|
33 |
":type:": "<class 'numpy.ndarray'>",
|
34 |
+
":serialized:": "gAWV9QEAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaAAQAAAAAAANeIFT93yU+/22hAP87cKD+g3Ku/SOwuQPSReL7XV3g/v5sLvwiVVr+ah2m/BG43wPOzej8hO08+62dsP2xzxD7RCyNAk7aCwHalXz8lJfk+u/1/v+taCLyvRY3AD5y6P4aEbT/3Ar8+DsPEPlxXbD/ptXhA/NfJwLIzxbwC7X8/3IVlPw3D4r6iA0NA9CytwLeZ2j2aiX4/Sn4fPi7gfL8tTwHABLu9vxRqTz8KDRY/WYhNPlHKej+zD4FASgSVwJjNIz8VvEQ/2JYqP2DhPj9SKkVAz9GvwLNXRT58M3s/HPSbvtLVc78Ek0O+Cfi3v0UIm75o+3O/Z6JCvxRLJj/j6GfAEB0YvlVGbb8qN8C+bbHKPT2+fj8KwGc/9bh/wKIwez+3kUW+19hSPxowET+3HeG9j/DSPnmUez+Udj2+jRBQP94lFT8oJzO+3XPtPr0Fsr3wB3+/7IRpP6rO0T4nw8y/lpGVQEnYPz/7gCk/X2NTvx5mEL/6fV/AxbdEQJSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLEEsGhpSMAUOUdJRSlC4="
|
35 |
},
|
36 |
"_last_episode_starts": {
|
37 |
":type:": "<class 'numpy.ndarray'>",
|
|
|
41 |
"_episode_num": 0,
|
42 |
"use_sde": false,
|
43 |
"sde_sample_freq": -1,
|
44 |
+
"_current_progress_remaining": -0.015808000000000044,
|
45 |
"_stats_window_size": 100,
|
46 |
"ep_info_buffer": {
|
47 |
":type:": "<class 'collections.deque'>",
|
48 |
+
":serialized:": "gAWV4AsAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHwFEAAAAAAACMAWyUS0WMAXSUR0CP3B+AmReUdX2UKGgGR8BYQAAAAAAAaAdLYmgIR0CP3BTjvNNbdX2UKGgGR8BSgAAAAAAAaAdLS2gIR0CP3C1+AmRedX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3GatLcsUdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP3NaTOgQIdX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3MLYwqRVdX2UKGgGR8BYwAAAAAAAaAdLZGgIR0CP3OSi/O+qdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP3Slgtvn9dX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3TKr7wazdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP3SjWTX8PdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3Uk690zTdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3awUxmCidX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3ZBF/hESdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3a5vtMPCdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3gqaw2VFdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP3jmozeoDdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP3munMt9QdX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3pDQ7cO9dX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP3oA9V3lkdX2UKGgGR8BWwAAAAAAAaAdLXGgIR0CP3qK8+RozdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP3tHzYmLMdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP3uxRl6JJdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP3uUgSvkjdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP3wsQNCqqdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3y+C9RJmdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3ydf9gnddX2UKGgGR8BVQAAAAAAAaAdLVmgIR0CP335SFXaKdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP35/z8P4EdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP36gh8pkPdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP38+0w8GLdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4AFA3T/idX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP4Dd5Y5ktdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4ISQo1DTdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4K7JW/8EdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP4NC6Ymb9dX2UKGgGR8BPgAAAAAAAaAdLQGgIR0CP4Nh86V+rdX2UKGgGR8BYwAAAAAAAaAdLZGgIR0CP4OxDb8FZdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP4OrXlKbsdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4P8stkFwdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP4Qa1kUbldX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP4W0cfeUIdX2UKGgGR8BWAAAAAAAAaAdLWWgIR0CP4WR2bG3ndX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP4ZdWyTpxdX2UKGgGR8BPgAAAAAAAaAdLQGgIR0CP4bGb1AZ9dX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP4ejwhGH6dX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP4cf0VafSdX2UKGgGR8BVwAAAAAAAaAdLWGgIR0CP4e4YJmdzdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4nA1vVEvdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP4rWvKU3XdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP4siFCb+cdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP4wAR02cbdX2UKGgGR8BUQAAAAAAAaAdLUmgIR0CP4vdonKGMdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4uxrSE13dX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP4vj/dZaFdX2UKGgGR8BZQAAAAAAAaAdLZmgIR0CP4uQp4KQadX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP4xAbADaHdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP41CiRGMGdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP48GD+R5kdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP47Y5DJEIdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP47h2nsLOdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP4+uoxYaHdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP5BJ4jbBXdX2UKGgGR8BXgAAAAAAAaAdLX2gIR0CP5IgzP8htdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP5MiiZfD2dX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP5K5QxesxdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP5QcTakAQdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP5PUoa1kUdX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP5TyTY/VzdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP5UhY/3WXdX2UKGgGR8BVwAAAAAAAaAdLWGgIR0CP5XHxz7uVdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP5YtthuwYdX2UKGgGR8BWwAAAAAAAaAdLXGgIR0CP5X/n4fwJdX2UKGgGR8BZgAAAAAAAaAdLZ2gIR0CP5aJ0nw5OdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP5ckv9LpSdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP5bZdOZb7dX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP5dNwBHTadX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP5eBI4EOidX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP5if/3nIRdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP5lp0wJw9dX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP5u7rcCYDdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP5ve40/GEdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP5yj+rELqdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP5z6ol2NedX2UKGgGR8BVgAAAAAAAaAdLV2gIR0CP55mZmZmadX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP5/5aePJadX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP6BpblijMdX2UKGgGR8BWAAAAAAAAaAdLWWgIR0CP6EbADaGpdX2UKGgGR8BaAAAAAAAAaAdLaWgIR0CP6GosI3R5dX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP6G4OtnwodX2UKGgGR8BVgAAAAAAAaAdLV2gIR0CP6H1wHZ9NdX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP6JCBwuM/dX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP6HdWyTpxdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP6J+uvECOdX2UKGgGR8BaAAAAAAAAaAdLaWgIR0CP6J7laKUFdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP6I2bXpW4dX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP6SFDfFaTdX2UKGgGR8BUwAAAAAAAaAdLVGgIR0CP6XQTmGM5dX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP6Xnq3VkMdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP6gLuQZGbdX2UKGgGR8BUQAAAAAAAaAdLUmgIR0CP6gE7nxJ/dWUu"
|
49 |
},
|
50 |
"ep_success_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
52 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
53 |
},
|
54 |
+
"_n_updates": 310,
|
55 |
"observation_space": {
|
56 |
":type:": "<class 'gymnasium.spaces.box.Box'>",
|
57 |
":serialized:": "gAWVNAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/2w9JwdYx4sGUaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD/bD0lB1jHiQZRoC0sGhZRoGXSUUpSMCGxvd19yZXBylIxDWyAtMS4gICAgICAgIC0xLiAgICAgICAgLTEuICAgICAgICAtMS4gICAgICAgLTEyLjU2NjM3MSAtMjguMjc0MzM0XZSMCWhpZ2hfcmVwcpSMPVsgMS4gICAgICAgIDEuICAgICAgICAxLiAgICAgICAgMS4gICAgICAgMTIuNTY2MzcxIDI4LjI3NDMzNF2UjApfbnBfcmFuZG9tlE51Yi4=",
|
Acrobot-v1/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5eb1f285af7176efcf8684202a05616470353b298955d726785a12dd9d6e8805
|
3 |
+
size 85418
|
Acrobot-v1/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94ce21048ff9cb76f4d280df34785808ff9c2fa5ad097f83a73fd6d24505d4a8
|
3 |
+
size 42354
|
Acrobot-v1/system_info.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
- Python: 3.10.12
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
- PyTorch: 2.5.1+cu121
|
5 |
-
- GPU Enabled:
|
6 |
- Numpy: 1.26.4
|
7 |
- Cloudpickle: 3.1.0
|
8 |
- Gymnasium: 0.28.1
|
|
|
2 |
- Python: 3.10.12
|
3 |
- Stable-Baselines3: 2.0.0a5
|
4 |
- PyTorch: 2.5.1+cu121
|
5 |
+
- GPU Enabled: False
|
6 |
- Numpy: 1.26.4
|
7 |
- Cloudpickle: 3.1.0
|
8 |
- Gymnasium: 0.28.1
|
README.md
CHANGED
@@ -16,7 +16,7 @@ model-index:
|
|
16 |
type: Acrobot-v1
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
-
value: -
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
|
|
16 |
type: Acrobot-v1
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: -78.10 +/- 12.89
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x78af0c95d630>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x78af0c95d6c0>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x78af0c95d750>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x78af0c95d7e0>", "_build": "<function ActorCriticPolicy._build at 0x78af0c95d870>", "forward": "<function ActorCriticPolicy.forward at 0x78af0c95d900>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x78af0c95d990>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x78af0c95da20>", "_predict": "<function ActorCriticPolicy._predict at 0x78af0c95dab0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x78af0c95db40>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x78af0c95dbd0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x78af0c95dc60>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x78af0c90acc0>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 32768, "_total_timesteps": 10000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1731776505599120364, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWV9QEAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaAAQAAAAAAAHpvcj+Fc6Q+r8JnP8x32b7efkg+CBqvvdOFfj9astu90H10P9bJlz6nEgW/LRaEP2JefD/M3Cs+4EB9P+iYFT7V2By+YjtDvK33fz84jYK8zTtxPwJgq746i5O/u1QeQGsJfj++Jf09kuo5P3b8L7/9hEq+PS5UPhv/eD/P4W2+jk5UP3ULDz/dqAW/Zw2Mvs/udz/3CX8+uaZ7PzrxO74sFQU/kkuqvgh2fj86OeA9CvJ9PwJ5Ab7Wh2m+rzO/PYclVT9byg0//rHpPhDHY78WRRw++pAkPniJbz/FpLS+lYVsP2Pkwz6v0PE+xxHHv6MVej8q3Fq+rjhyP0O1pT6z6Iy8jlHUPSvpfz88ONg8ycl2P4MciD7mt6C+pebFP4Z1fz+hEIW93GJ/PwO9jT3SChu/HCkrPxOFeD9sunU+9Nl0Px5zlb4V8zS+Im14vvRxfT8ZTxC+MUF/P6QqnL13CF8/S4vSv5/Hez8BKzk++FNUP2oDD7/EGr4+VxDGvpSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLEEsGhpSMAUOUdJRSlC4="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -2.2768, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWV6AcAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHwH7AAAAAAACMAWyUTe0BjAF0lEdAKDx9G7SRbXV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQChOicoYvWZ1fZQoaAZHwH9AAAAAAABoB030AWgIR0AoTkMkQf6odX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAKE4PXkHUt3V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQChN2Pkq+al1fZQoaAZHwH9AAAAAAABoB030AWgIR0AoTanJkoWpdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAKE10tAcDKnV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQChNRaX8fmt1fZQoaAZHwH9AAAAAAABoB030AWgIR0AoSVk+X7cgdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAKEkPczqKQHV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQChIyGi5/b11fZQoaAZHwH9AAAAAAABoB030AWgIR0AoSJTER8MNdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAKEhkZrHlwXV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQChIMjNY8uB1fZQoaAZHwH9AAAAAAABoB030AWgIR0AoSAQxvegtdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAKEfViF0xM3V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCs9KXfIjnp1fZQoaAZHwH9AAAAAAABoB030AWgIR0ArT0uDjBEbdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAK08Cgbp/w3V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCtOzlcQiA51fZQoaAZHwH9AAAAAAABoB030AWgIR0ArTplBhQWOdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAK05p8F6iTXV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCtONvOyE+R1fZQoaAZHwH9AAAAAAABoB030AWgIR0ArTgb6xgRcdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAK0oacZtNz3V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCtJ0OmR/3F1fZQoaAZHwH9AAAAAAABoB030AWgIR0ArSYfnwG4adX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAK0lVLi++NHV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCtJJNCZ4Od1fZQoaAZHwH9AAAAAAABoB030AWgIR0ArSPKdQO4HdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAK0jGtITXa3V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQCtIl4TsY2t1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuQKhtcfNidX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdALlLdWQwK0HV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQC5SlDWsijd1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuUmAskIHDdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdALlIrOJLuhXV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQC5R/Aj6eoV1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuUclPacqfdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdALlGZeAuqWHV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQC5NrKvFFUh1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuTWMCLdeqdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdALk0aAFxGUnV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQC5M50bLlmx1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuTLcsUZeidX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdALkyE+PikwnV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQC5MWTHKfWd1fZQoaAZHwH9AAAAAAABoB030AWgIR0AuTCoCMglodX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKFObiIcinV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQDCqXjU/fO51fZQoaAZHwH9AAAAAAABoB030AWgIR0AwqjsUqQRxdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKohMajveHV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQDCqBd2PkrB1fZQoaAZHwH9AAAAAAABoB030AWgIR0Awqe40/GEPdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKnUH6dlNHV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQDCpvBJqZc91fZQoaAZHwH9AAAAAAABoB030AWgIR0Awp8WsRxtIdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKeh0yP+43V9lChoBkfAf0AAAAAAAGgHTfQBaAhHQDCnfXPJJXh1fZQoaAZHwH9AAAAAAABoB030AWgIR0Awp2OQyRCAdX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKdLYf4h2XV9lChoBkfAf0AAAAAAAGgHTfQBaAhHQDCnMs6JZW91fZQoaAZHwH9AAAAAAABoB030AWgIR0AwpxxkupS8dX2UKGgGR8B/QAAAAAAAaAdN9AFoCEdAMKcE3bVSXXVlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 10, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVNAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/2w9JwdYx4sGUaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD/bD0lB1jHiQZRoC0sGhZRoGXSUUpSMCGxvd19yZXBylIxDWyAtMS4gICAgICAgIC0xLiAgICAgICAgLTEuICAgICAgICAtMS4gICAgICAgLTEyLjU2NjM3MSAtMjguMjc0MzM0XZSMCWhpZ2hfcmVwcpSMPVsgMS4gICAgICAgIDEuICAgICAgICAxLiAgICAgICAgMS4gICAgICAgMTIuNTY2MzcxIDI4LjI3NDMzNF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True]", "bounded_above": "[ True True True True True True]", "_shape": [6], "low": "[ -1. -1. -1. -1. -12.566371 -28.274334]", "high": "[ 1. 1. 1. 1. 12.566371 28.274334]", "low_repr": "[ -1. -1. -1. -1. -12.566371 -28.274334]", "high_repr": "[ 1. 1. 1. 1. 12.566371 28.274334]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV2wAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIAwAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgOjApfbnBfcmFuZG9tlE51Yi4=", "n": "3", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "n_steps": 2048, "gamma": 0.99, "gae_lambda": 0.95, "ent_coef": 0.0, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 10, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVrQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFowEZnVuY5SMDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVrQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFowEZnVuY5SMDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "system_info": {"OS": "Linux-6.1.85+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Thu Jun 27 21:05:47 UTC 2024", "Python": "3.10.12", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.5.1+cu121", "GPU Enabled": "True", "Numpy": "1.26.4", "Cloudpickle": "3.1.0", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7a2c62e77760>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7a2c62e777f0>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7a2c62e77880>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7a2c62e77910>", "_build": "<function ActorCriticPolicy._build at 0x7a2c62e779a0>", "forward": "<function ActorCriticPolicy.forward at 0x7a2c62e77a30>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7a2c62e77ac0>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7a2c62e77b50>", "_predict": "<function ActorCriticPolicy._predict at 0x7a2c62e77be0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7a2c62e77c70>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7a2c62e77d00>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7a2c62e77d90>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7a2c62e1d580>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1731776890927993344, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWV9QEAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaAAQAAAAAAANeIFT93yU+/22hAP87cKD+g3Ku/SOwuQPSReL7XV3g/v5sLvwiVVr+ah2m/BG43wPOzej8hO08+62dsP2xzxD7RCyNAk7aCwHalXz8lJfk+u/1/v+taCLyvRY3AD5y6P4aEbT/3Ar8+DsPEPlxXbD/ptXhA/NfJwLIzxbwC7X8/3IVlPw3D4r6iA0NA9CytwLeZ2j2aiX4/Sn4fPi7gfL8tTwHABLu9vxRqTz8KDRY/WYhNPlHKej+zD4FASgSVwJjNIz8VvEQ/2JYqP2DhPj9SKkVAz9GvwLNXRT58M3s/HPSbvtLVc78Ek0O+Cfi3v0UIm75o+3O/Z6JCvxRLJj/j6GfAEB0YvlVGbb8qN8C+bbHKPT2+fj8KwGc/9bh/wKIwez+3kUW+19hSPxowET+3HeG9j/DSPnmUez+Udj2+jRBQP94lFT8oJzO+3XPtPr0Fsr3wB3+/7IRpP6rO0T4nw8y/lpGVQEnYPz/7gCk/X2NTvx5mEL/6fV/AxbdEQJSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLEEsGhpSMAUOUdJRSlC4="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWV4AsAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHwFEAAAAAAACMAWyUS0WMAXSUR0CP3B+AmReUdX2UKGgGR8BYQAAAAAAAaAdLYmgIR0CP3BTjvNNbdX2UKGgGR8BSgAAAAAAAaAdLS2gIR0CP3C1+AmRedX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3GatLcsUdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP3NaTOgQIdX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3MLYwqRVdX2UKGgGR8BYwAAAAAAAaAdLZGgIR0CP3OSi/O+qdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP3Slgtvn9dX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3TKr7wazdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP3SjWTX8PdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3Uk690zTdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3awUxmCidX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3ZBF/hESdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP3a5vtMPCdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3gqaw2VFdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP3jmozeoDdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP3munMt9QdX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP3pDQ7cO9dX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP3oA9V3lkdX2UKGgGR8BWwAAAAAAAaAdLXGgIR0CP3qK8+RozdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP3tHzYmLMdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP3uxRl6JJdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP3uUgSvkjdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP3wsQNCqqdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3y+C9RJmdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP3ydf9gnddX2UKGgGR8BVQAAAAAAAaAdLVmgIR0CP335SFXaKdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP35/z8P4EdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP36gh8pkPdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP38+0w8GLdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4AFA3T/idX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP4Dd5Y5ktdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4ISQo1DTdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4K7JW/8EdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP4NC6Ymb9dX2UKGgGR8BPgAAAAAAAaAdLQGgIR0CP4Nh86V+rdX2UKGgGR8BYwAAAAAAAaAdLZGgIR0CP4OxDb8FZdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP4OrXlKbsdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP4P8stkFwdX2UKGgGR8BUgAAAAAAAaAdLU2gIR0CP4Qa1kUbldX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP4W0cfeUIdX2UKGgGR8BWAAAAAAAAaAdLWWgIR0CP4WR2bG3ndX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP4ZdWyTpxdX2UKGgGR8BPgAAAAAAAaAdLQGgIR0CP4bGb1AZ9dX2UKGgGR8BTwAAAAAAAaAdLUGgIR0CP4ejwhGH6dX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP4cf0VafSdX2UKGgGR8BVwAAAAAAAaAdLWGgIR0CP4e4YJmdzdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4nA1vVEvdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP4rWvKU3XdX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP4siFCb+cdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP4wAR02cbdX2UKGgGR8BUQAAAAAAAaAdLUmgIR0CP4vdonKGMdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP4uxrSE13dX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP4vj/dZaFdX2UKGgGR8BZQAAAAAAAaAdLZmgIR0CP4uQp4KQadX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP4xAbADaHdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP41CiRGMGdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP48GD+R5kdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP47Y5DJEIdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP47h2nsLOdX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP4+uoxYaHdX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP5BJ4jbBXdX2UKGgGR8BXgAAAAAAAaAdLX2gIR0CP5IgzP8htdX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP5MiiZfD2dX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP5K5QxesxdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP5QcTakAQdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP5PUoa1kUdX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP5TyTY/VzdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP5UhY/3WXdX2UKGgGR8BVwAAAAAAAaAdLWGgIR0CP5XHxz7uVdX2UKGgGR8BTgAAAAAAAaAdLT2gIR0CP5YtthuwYdX2UKGgGR8BWwAAAAAAAaAdLXGgIR0CP5X/n4fwJdX2UKGgGR8BZgAAAAAAAaAdLZ2gIR0CP5aJ0nw5OdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP5ckv9LpSdX2UKGgGR8BSAAAAAAAAaAdLSWgIR0CP5bZdOZb7dX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP5dNwBHTadX2UKGgGR8BRgAAAAAAAaAdLR2gIR0CP5eBI4EOidX2UKGgGR8BSwAAAAAAAaAdLTGgIR0CP5if/3nIRdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP5lp0wJw9dX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP5u7rcCYDdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP5ve40/GEdX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP5yj+rELqdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP5z6ol2NedX2UKGgGR8BVgAAAAAAAaAdLV2gIR0CP55mZmZmadX2UKGgGR8BUAAAAAAAAaAdLUWgIR0CP5/5aePJadX2UKGgGR8BTAAAAAAAAaAdLTWgIR0CP6BpblijMdX2UKGgGR8BWAAAAAAAAaAdLWWgIR0CP6EbADaGpdX2UKGgGR8BaAAAAAAAAaAdLaWgIR0CP6GosI3R5dX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP6G4OtnwodX2UKGgGR8BVgAAAAAAAaAdLV2gIR0CP6H1wHZ9NdX2UKGgGR8BWgAAAAAAAaAdLW2gIR0CP6JCBwuM/dX2UKGgGR8BVAAAAAAAAaAdLVWgIR0CP6HdWyTpxdX2UKGgGR8BSQAAAAAAAaAdLSmgIR0CP6J+uvECOdX2UKGgGR8BaAAAAAAAAaAdLaWgIR0CP6J7laKUFdX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP6I2bXpW4dX2UKGgGR8BRwAAAAAAAaAdLSGgIR0CP6SFDfFaTdX2UKGgGR8BUwAAAAAAAaAdLVGgIR0CP6XQTmGM5dX2UKGgGR8BTQAAAAAAAaAdLTmgIR0CP6Xnq3VkMdX2UKGgGR8BQAAAAAAAAaAdLQWgIR0CP6gLuQZGbdX2UKGgGR8BUQAAAAAAAaAdLUmgIR0CP6gE7nxJ/dWUu"}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 310, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVNAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/2w9JwdYx4sGUaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD/bD0lB1jHiQZRoC0sGhZRoGXSUUpSMCGxvd19yZXBylIxDWyAtMS4gICAgICAgIC0xLiAgICAgICAgLTEuICAgICAgICAtMS4gICAgICAgLTEyLjU2NjM3MSAtMjguMjc0MzM0XZSMCWhpZ2hfcmVwcpSMPVsgMS4gICAgICAgIDEuICAgICAgICAxLiAgICAgICAgMS4gICAgICAgMTIuNTY2MzcxIDI4LjI3NDMzNF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True]", "bounded_above": "[ True True True True True True]", "_shape": [6], "low": "[ -1. -1. -1. -1. -12.566371 -28.274334]", "high": "[ 1. 1. 1. 1. 12.566371 28.274334]", "low_repr": "[ -1. -1. -1. -1. -12.566371 -28.274334]", "high_repr": "[ 1. 1. 1. 1. 12.566371 28.274334]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV2wAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIAwAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgOjApfbnBfcmFuZG9tlE51Yi4=", "n": "3", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "n_steps": 2048, "gamma": 0.99, "gae_lambda": 0.95, "ent_coef": 0.0, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 10, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVrQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFowEZnVuY5SMDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVrQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUaACMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFowEZnVuY5SMDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "system_info": {"OS": "Linux-6.1.85+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Thu Jun 27 21:05:47 UTC 2024", "Python": "3.10.12", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.5.1+cu121", "GPU Enabled": "False", "Numpy": "1.26.4", "Cloudpickle": "3.1.0", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
|
replay.mp4
CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
|
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward": -
|
|
|
1 |
+
{"mean_reward": -78.1, "std_reward": 12.88759093081403, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-11-16T17:25:33.884731"}
|