python_code
stringlengths 0
869k
|
---|
from .vec_env import VecEnvWrapper
import numpy as np
from gym import spaces
class VecFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self):
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
|
import contextlib
import os
from abc import ABC, abstractmethod
from baselines.common.tile_images import tile_images
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
|
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
"""
Helpers for dealing with vectorized environments.
"""
from collections import OrderedDict
import gym
import numpy as np
def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()}
def dict_to_obs(obs_dict):
"""
Convert an observation dict into a raw array if the
original observation space was not a Dict space.
"""
if set(obs_dict.keys()) == {None}:
return obs_dict[None]
return obs_dict
def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
elif isinstance(obs_space, gym.spaces.Tuple):
assert isinstance(obs_space.spaces, tuple)
subspaces = {i: obs_space.spaces[i] for i in range(len(obs_space.spaces))}
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes
def obs_to_dict(obs):
"""
Convert an observation into a dict.
"""
if isinstance(obs, dict):
return obs
return {None: obs}
|
import os
from baselines import logger
from baselines.common.vec_env import VecEnvWrapper
from gym.wrappers.monitoring import video_recorder
class VecVideoRecorder(VecEnvWrapper):
"""
Wrap VecEnv to record rendered image as mp4 video.
"""
def __init__(self, venv, directory, record_video_trigger, video_length=200):
"""
# Arguments
venv: VecEnv to wrap
directory: Where to save videos
record_video_trigger:
Function that defines when to start recording.
The function takes the current number of step,
and returns whether we should start recording or not.
video_length: Length of recorded video
"""
VecEnvWrapper.__init__(self, venv)
self.record_video_trigger = record_video_trigger
self.video_recorder = None
self.directory = os.path.abspath(directory)
if not os.path.exists(self.directory): os.mkdir(self.directory)
self.file_prefix = "vecenv"
self.file_infix = '{}'.format(os.getpid())
self.step_id = 0
self.video_length = video_length
self.recording = False
self.recorded_frames = 0
def reset(self):
obs = self.venv.reset()
self.start_video_recorder()
return obs
def start_video_recorder(self):
self.close_video_recorder()
base_path = os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.step_id))
self.video_recorder = video_recorder.VideoRecorder(
env=self.venv,
base_path=base_path,
metadata={'step_id': self.step_id}
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.recording = True
def _video_enabled(self):
return self.record_video_trigger(self.step_id)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.step_id += 1
if self.recording:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.recorded_frames > self.video_length:
logger.info("Saving video to ", self.video_recorder.path)
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return obs, rews, dones, infos
def close_video_recorder(self):
if self.recording:
self.video_recorder.close()
self.recording = False
self.recorded_frames = 0
def close(self):
VecEnvWrapper.close(self)
self.close_video_recorder()
def __del__(self):
self.close()
|
from .vec_env import VecEnvObservationWrapper
class VecExtractDictObs(VecEnvObservationWrapper):
def __init__(self, venv, key):
self.key = key
super().__init__(venv=venv,
observation_space=venv.observation_space.spaces[self.key])
def process(self, obs):
return obs[self.key]
|
from .vec_env import AlreadySteppingError, NotSteppingError, VecEnv, VecEnvWrapper, VecEnvObservationWrapper, CloudpickleWrapper
from .dummy_vec_env import DummyVecEnv
from .shmem_vec_env import ShmemVecEnv
from .subproc_vec_env import SubprocVecEnv
from .vec_frame_stack import VecFrameStack
from .vec_monitor import VecMonitor
from .vec_normalize import VecNormalize
from .vec_remove_dict_obs import VecExtractDictObs
__all__ = ['AlreadySteppingError', 'NotSteppingError', 'VecEnv', 'VecEnvWrapper', 'VecEnvObservationWrapper', 'CloudpickleWrapper', 'DummyVecEnv', 'ShmemVecEnv', 'SubprocVecEnv', 'VecFrameStack', 'VecMonitor', 'VecNormalize', 'VecExtractDictObs']
|
from . import VecEnvWrapper
from baselines.bench.monitor import ResultsWriter
import numpy as np
import time
from collections import deque
class VecMonitor(VecEnvWrapper):
def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()):
VecEnvWrapper.__init__(self, venv)
self.eprets = None
self.eplens = None
self.epcount = 0
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename, header={'t_start': self.tstart},
extra_keys=info_keywords)
else:
self.results_writer = None
self.info_keywords = info_keywords
self.keep_buf = keep_buf
if self.keep_buf:
self.epret_buf = deque([], maxlen=keep_buf)
self.eplen_buf = deque([], maxlen=keep_buf)
def reset(self):
obs = self.venv.reset()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.eprets += rews
self.eplens += 1
newinfos = list(infos[:])
for i in range(len(dones)):
if dones[i]:
info = infos[i].copy()
ret = self.eprets[i]
eplen = self.eplens[i]
epinfo = {'r': ret, 'l': eplen, 't': round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
info['episode'] = epinfo
if self.keep_buf:
self.epret_buf.append(ret)
self.eplen_buf.append(eplen)
self.epcount += 1
self.eprets[i] = 0
self.eplens[i] = 0
if self.results_writer:
self.results_writer.write_row(epinfo)
newinfos[i] = info
return obs, rews, dones, newinfos
|
from . import VecEnvWrapper
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, use_tf=False):
VecEnvWrapper.__init__(self, venv)
if use_tf:
from baselines.common.running_mean_std import TfRunningMeanStd
self.ob_rms = TfRunningMeanStd(shape=self.observation_space.shape, scope='ob_rms') if ob else None
self.ret_rms = TfRunningMeanStd(shape=(), scope='ret_rms') if ret else None
else:
from baselines.common.running_mean_std import RunningMeanStd
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset()
return self._obfilt(obs)
|
import numpy as np
from .vec_env import VecEnv
from .util import copy_obs_dict, dict_to_obs, obs_space_info
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
obs_space = env.observation_space
self.keys, shapes, dtypes = obs_space_info(obs_space)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
self.spec = self.envs[0].spec
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
# if isinstance(self.envs[e].action_space, spaces.Discrete):
# action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
return dict_to_obs(copy_obs_dict(self.buf_obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
|
import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
|
import os
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.ppo2.runner import Runner
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
|
import tensorflow as tf
import numpy as np
from baselines.ppo2.model import Model
class MicrobatchedModel(Model):
"""
Model that does training one microbatch at a time - when gradient computation
on the entire minibatch causes some overflow
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight, comm, microbatch_size):
self.nmicrobatches = nbatch_train // microbatch_size
self.microbatch_size = microbatch_size
assert nbatch_train % microbatch_size == 0, 'microbatch_size ({}) should divide nbatch_train ({}) evenly'.format(microbatch_size, nbatch_train)
super().__init__(
policy=policy,
ob_space=ob_space,
ac_space=ac_space,
nbatch_act=nbatch_act,
nbatch_train=microbatch_size,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
mpi_rank_weight=mpi_rank_weight,
comm=comm)
self.grads_ph = [tf.placeholder(dtype=g.dtype, shape=g.shape) for g in self.grads]
grads_ph_and_vars = list(zip(self.grads_ph, self.var))
self._apply_gradients_op = self.trainer.apply_gradients(grads_ph_and_vars)
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
assert states is None, "microbatches with recurrent models are not supported yet"
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
# Initialize empty list for per-microbatch stats like pg_loss, vf_loss, entropy, approxkl (whatever is in self.stats_list)
stats_vs = []
for microbatch_idx in range(self.nmicrobatches):
_sli = range(microbatch_idx * self.microbatch_size, (microbatch_idx+1) * self.microbatch_size)
td_map = {
self.train_model.X: obs[_sli],
self.A:actions[_sli],
self.ADV:advs[_sli],
self.R:returns[_sli],
self.CLIPRANGE:cliprange,
self.OLDNEGLOGPAC:neglogpacs[_sli],
self.OLDVPRED:values[_sli]
}
# Compute gradient on a microbatch (note that variables do not change here) ...
grad_v, stats_v = self.sess.run([self.grads, self.stats_list], td_map)
if microbatch_idx == 0:
sum_grad_v = grad_v
else:
# .. and add to the total of the gradients
for i, g in enumerate(grad_v):
sum_grad_v[i] += g
stats_vs.append(stats_v)
feed_dict = {ph: sum_g / self.nmicrobatches for ph, sum_g in zip(self.grads_ph, sum_grad_v)}
feed_dict[self.LR] = lr
# Update variables using average of the gradients
self.sess.run(self._apply_gradients_op, feed_dict)
# Return average of the stats
return np.mean(np.array(stats_vs), axis=0).tolist()
|
import gym
import tensorflow as tf
import numpy as np
from functools import partial
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.tf_util import make_session
from baselines.ppo2.ppo2 import learn
from baselines.ppo2.microbatched_model import MicrobatchedModel
def test_microbatches():
def env_fn():
env = gym.make('CartPole-v0')
env.seed(0)
return env
learn_fn = partial(learn, network='mlp', nsteps=32, total_timesteps=32, seed=0)
env_ref = DummyVecEnv([env_fn])
sess_ref = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_ref)
vars_ref = {v.name: sess_ref.run(v) for v in tf.trainable_variables()}
env_test = DummyVecEnv([env_fn])
sess_test = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_test, model_fn=partial(MicrobatchedModel, microbatch_size=2))
# learn_fn(env=env_test)
vars_test = {v.name: sess_test.run(v) for v in tf.trainable_variables()}
for v in vars_ref:
np.testing.assert_allclose(vars_ref[v], vars_test[v], atol=3e-3)
if __name__ == '__main__':
test_microbatches()
|
import tensorflow as tf
import functools
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.tf_util import initialize
try:
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.mpi_util import sync_from_root
except ImportError:
MPI = None
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None):
self.sess = sess = get_session()
if MPI is not None and comm is None:
comm = MPI.COMM_WORLD
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
if microbatch_size is None:
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
# CREATE THE PLACEHOLDERS
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
# Keep track of old actor
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
# Keep track of old critic
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.LR = LR = tf.placeholder(tf.float32, [])
# Cliprange
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value to reduce variability during Critic training
# Get the predicted value
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
# Unclipped value
vf_losses1 = tf.square(vpred - R)
# Clipped value
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
# Calculate ratio (pi current policy / pi old policy)
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
# Defining Loss = - J is equivalent to max J
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
# Final PG loss
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
# Total loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = tf.trainable_variables('ppo2_model')
# 2. Build our trainer
if comm is not None and comm.Get_size() > 1:
self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5)
else:
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {
self.train_model.X : obs,
self.A : actions,
self.ADV : advs,
self.R : returns,
self.LR : lr,
self.CLIPRANGE : cliprange,
self.OLDNEGLOGPAC : neglogpacs,
self.OLDVPRED : values
}
if states is not None:
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run(
self.stats_list + [self._train_op],
td_map
)[:-1]
|
def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def retro():
return atari()
|
import numpy as np
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
if replay_strategy == 'future':
future_p = 1 - (1. / (1 + replay_k))
else: # 'replay_strategy' == 'none'
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
for key in episode_batch.keys()}
# Select future time indexes proportional with probability future_p. These
# will be used for HER replay by substituting in future goals.
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# Replace goal with achieved goal but only for the previously-selected
# HER transitions (as defined by her_indexes). For the other transitions,
# keep the original goal.
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# Reconstruct info dictionary for reward computation.
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
# Re-compute reward since we may have substituted the goal.
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
|
import tensorflow as tf
from baselines.her.util import store_args, nn
class ActorCritic:
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers,
**kwargs):
"""The actor-critic network and related training code.
Args:
inputs_tf (dict of tensors): all necessary inputs for the network: the
observation (o), the goal (g), and the action (u)
dimo (int): the dimension of the observations
dimg (int): the dimension of the goals
dimu (int): the dimension of the actions
max_u (float): the maximum magnitude of actions; action outputs will be scaled
accordingly
o_stats (baselines.her.Normalizer): normalizer for observations
g_stats (baselines.her.Normalizer): normalizer for goals
hidden (int): number of hidden units that should be used in hidden layers
layers (int): number of hidden layers
"""
self.o_tf = inputs_tf['o']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
# Prepare inputs for actor and critic.
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
input_pi = tf.concat(axis=1, values=[o, g]) # for actor
# Networks.
with tf.variable_scope('pi'):
self.pi_tf = self.max_u * tf.tanh(nn(
input_pi, [self.hidden] * self.layers + [self.dimu]))
with tf.variable_scope('Q'):
# for policy training
input_Q = tf.concat(axis=1, values=[o, g, self.pi_tf / self.max_u])
self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [1])
# for critic training
input_Q = tf.concat(axis=1, values=[o, g, self.u_tf / self.max_u])
self._input_Q = input_Q # exposed for tests
self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [1], reuse=True)
|
import os
import subprocess
import sys
import importlib
import inspect
import functools
import tensorflow as tf
import numpy as np
from baselines.common import tf_util as U
def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0)
def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes) - 1 else None
input = tf.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
name=name + '_' + str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
def mpi_fork(n, extra_mpi_args=[]):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
args = ["mpirun", "-np", str(n)] + \
extra_mpi_args + \
[sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
install_mpi_excepthook()
return "child"
def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
def transitions_in_episode_batch(episode_batch):
"""Number of transitions in a given episode batch.
"""
shape = episode_batch['u'].shape
return shape[0] * shape[1]
def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim - 1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape)
|
from collections import deque
import numpy as np
import pickle
from baselines.her.util import convert_episode_to_batch_major, store_args
class RolloutWorker:
@store_args
def __init__(self, venv, policy, dims, logger, T, rollout_batch_size=1,
exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,
random_eps=0, history_len=100, render=False, monitor=False, **kwargs):
"""Rollout worker generates experience by interacting with one or many environments.
Args:
venv: vectorized gym environments.
policy (object): the policy that is used to act
dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)
logger (object): the logger that is used by the rollout worker
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
assert self.T > 0
self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]
self.success_history = deque(maxlen=history_len)
self.Q_history = deque(maxlen=history_len)
self.n_episodes = 0
self.reset_all_rollouts()
self.clear_history()
def reset_all_rollouts(self):
self.obs_dict = self.venv.reset()
self.initial_o = self.obs_dict['observation']
self.initial_ag = self.obs_dict['achieved_goal']
self.g = self.obs_dict['desired_goal']
def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode)
def clear_history(self):
"""Clears all histories that are used for statistics
"""
self.success_history.clear()
self.Q_history.clear()
def current_success_rate(self):
return np.mean(self.success_history)
def current_mean_Q(self):
return np.mean(self.Q_history)
def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f)
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
|
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.common import tf_util
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
global DEMO_BUFFER #buffer for demonstrations
class DDPG(object):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
sample_transitions, gamma, reuse=False, **kwargs):
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
Added functionality to use demonstrations for training to Overcome exploration problem.
Args:
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
actions (u)
buffer_size (int): number of transitions that are stored in the replay buffer
hidden (int): number of units in the hidden layers
layers (int): number of hidden layers
network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
polyak (float): coefficient for Polyak-averaging of the target network
batch_size (int): batch size for training
Q_lr (float): learning rate for the Q (critic) network
pi_lr (float): learning rate for the pi (actor) network
norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
action_l2 (float): coefficient for L2 penalty on the actions
clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
scope (str): the scope used for the TensorFlow graph
T (int): the time horizon for rollouts
rollout_batch_size (int): number of parallel rollouts per DDPG agent
subtract_goals (function): function that subtracts goals from each other
relative_goals (boolean): whether or not relative goals should be fed into the network
clip_pos_returns (boolean): whether or not positive returns should be clipped
clip_return (float): clip returns to be in [-clip_return, clip_return]
sample_transitions (function) function that samples from the replay buffer
gamma (float): gamma used for Q learning updates
reuse (boolean): whether or not the networks should be reused
bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss
q_filter: whether or not a filter on the q value update should be used when training with demonstartions
num_demo: Number of episodes in to be used in the demonstration buffer
demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread
prm_loss_weight: Weight corresponding to the primary loss
aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss
"""
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
# Prepare staging area for feeding data to the model.
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
self.stage_shapes = stage_shapes
# Create network.
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T-1 if key != 'o' else self.T, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
global DEMO_BUFFER
DEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def step(self, obs):
actions = self.get_actions(obs['observation'], obs['achieved_goal'], obs['desired_goal'])
return actions, None, None, None
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
# feed
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def init_demo_buffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer
demoData = np.load(demoDataFile) #load the demonstration data from data file
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
demo_data_obs = demoData['obs']
demo_data_acs = demoData['acs']
demo_data_info = demoData['info']
for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
i = 0
for transition in range(self.T - 1):
obs.append([demo_data_obs[epsd][transition].get('observation')])
acts.append([demo_data_acs[epsd][transition]])
goals.append([demo_data_obs[epsd][transition].get('desired_goal')])
achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')])
for idx, key in enumerate(info_keys):
info_values[idx][transition, i] = demo_data_info[epsd][transition][key]
obs.append([demo_data_obs[epsd][self.T - 1].get('observation')])
achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')])
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(info_keys, info_values):
episode['info_{}'.format(key)] = value
episode = convert_episode_to_batch_major(episode)
global DEMO_BUFFER
DEMO_BUFFER.store_episode(episode) # create the observation dict and append them into the demonstration buffer
logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
if update_stats:
# add transitions to normalizer to normalize the demo data as well
episode['o_2'] = episode['o'][:, 1:, :]
episode['ag_2'] = episode['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode)
transitions = self.sample_transitions(episode, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
episode.clear()
logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _grads(self):
# Avoid feed_dict here for performance!
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
global DEMO_BUFFER
transitions_demo = DEMO_BUFFER.sample(self.demo_batch_size) #sample from the demo buffer
for k, values in transitions_demo.items():
rolloutV = transitions[k].tolist()
for v in values:
rolloutV.append(v.tolist())
transitions[k] = np.array(rolloutV)
else:
transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf_util.get_session()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
#choose only the demo buffer samples
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else: #If not training with demonstrations
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
def save(self, save_path):
tf_util.save_variables(save_path)
|
import threading
import numpy as np
class ReplayBuffer:
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):
"""Creates a replay buffer.
Args:
buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
buffer
size_in_transitions (int): the size of the buffer, measured in transitions
T (int): the time horizon for episodes
sample_transitions (function): a function that samples from the replay buffer
"""
self.buffer_shapes = buffer_shapes
self.size = size_in_transitions // T
self.T = T
self.sample_transitions = sample_transitions
# self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
self.buffers = {key: np.empty([self.size, *shape])
for key, shape in buffer_shapes.items()}
# memory management
self.current_size = 0
self.n_transitions_stored = 0
self.lock = threading.Lock()
@property
def full(self):
with self.lock:
return self.current_size == self.size
def sample(self, batch_size):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sample_transitions(buffers, batch_size)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
return transitions
def store_episode(self, episode_batch):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T
def get_current_episode_size(self):
with self.lock:
return self.current_size
def get_current_size(self):
with self.lock:
return self.current_size * self.T
def get_transitions_stored(self):
with self.lock:
return self.n_transitions_stored
def clear_buffer(self):
with self.lock:
self.current_size = 0
def _get_storage_idx(self, inc=None):
inc = inc or 1 # size increment
assert inc <= self.size, "Batch committed to replay is too large!"
# go consecutively until you hit the end, and then go randomly.
if self.current_size+inc <= self.size:
idx = np.arange(self.current_size, self.current_size+inc)
elif self.current_size < self.size:
overflow = inc - (self.size - self.current_size)
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, overflow)
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, inc)
# update replay size
self.current_size = min(self.size, self.current_size+inc)
if inc == 1:
idx = idx[0]
return idx
|
import os
import click
import numpy as np
import json
from mpi4py import MPI
from baselines import logger
from baselines.common import set_global_seeds, tf_util
from baselines.common.mpi_moments import mpi_moments
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
def mpi_average(value):
if not isinstance(value, list):
value = [value]
if not any(value):
value = [0.]
return mpi_moments(np.array(value))[0]
def train(*, policy, rollout_worker, evaluator,
n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval,
save_path, demo_file, **kwargs):
rank = MPI.COMM_WORLD.Get_rank()
if save_path:
latest_policy_path = os.path.join(save_path, 'policy_latest.pkl')
best_policy_path = os.path.join(save_path, 'policy_best.pkl')
periodic_policy_path = os.path.join(save_path, 'policy_{}.pkl')
logger.info("Training...")
best_success_rate = -1
if policy.bc_loss == 1: policy.init_demo_buffer(demo_file) #initialize demo buffer if training with demonstrations
# num_timesteps = n_epochs * n_cycles * rollout_length * number of rollout workers
for epoch in range(n_epochs):
# train
rollout_worker.clear_history()
for _ in range(n_cycles):
episode = rollout_worker.generate_rollouts()
policy.store_episode(episode)
for _ in range(n_batches):
policy.train()
policy.update_target_net()
# test
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# record logs
logger.record_tabular('epoch', epoch)
for key, val in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
for key, val in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for key, val in policy.logs():
logger.record_tabular(key, mpi_average(val))
if rank == 0:
logger.dump_tabular()
# save the policy if it's better than the previous ones
success_rate = mpi_average(evaluator.current_success_rate())
if rank == 0 and success_rate >= best_success_rate and save_path:
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if rank == 0 and policy_save_interval > 0 and epoch % policy_save_interval == 0 and save_path:
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
# make sure that different threads have different seeds
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if rank != 0:
assert local_uniform[0] != root_uniform[0]
return policy
def learn(*, network, env, total_timesteps,
seed=None,
eval_env=None,
replay_strategy='future',
policy_save_interval=5,
clip_return=True,
demo_file=None,
override_params=None,
load_path=None,
save_path=None,
**kwargs
):
override_params = override_params or {}
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
num_cpu = MPI.COMM_WORLD.Get_size()
# Seed everything.
rank_seed = seed + 1000000 * rank if seed is not None else None
set_global_seeds(rank_seed)
# Prepare params.
params = config.DEFAULT_PARAMS
env_name = env.spec.id
params['env_name'] = env_name
params['replay_strategy'] = replay_strategy
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params.update(**override_params) # makes it possible to override any parameter
with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
json.dump(params, f)
params = config.prepare_params(params)
params['rollout_batch_size'] = env.num_envs
if demo_file is not None:
params['bc_loss'] = 1
params.update(kwargs)
config.log_params(params, logger=logger)
if num_cpu == 1:
logger.warn()
logger.warn('*** Warning ***')
logger.warn(
'You are running HER with just a single MPI worker. This will work, but the ' +
'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) ' +
'were obtained with --num_cpu 19. This makes a significant difference and if you ' +
'are looking to reproduce those results, be aware of this. Please also refer to ' +
'https://github.com/openai/baselines/issues/314 for further details.')
logger.warn('****************')
logger.warn()
dims = config.configure_dims(params)
policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)
if load_path is not None:
tf_util.load_variables(load_path)
rollout_params = {
'exploit': False,
'use_target_net': False,
'use_demo_states': True,
'compute_Q': False,
'T': params['T'],
}
eval_params = {
'exploit': True,
'use_target_net': params['test_with_polyak'],
'use_demo_states': False,
'compute_Q': True,
'T': params['T'],
}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
eval_env = eval_env or env
rollout_worker = RolloutWorker(env, policy, dims, logger, monitor=True, **rollout_params)
evaluator = RolloutWorker(eval_env, policy, dims, logger, **eval_params)
n_cycles = params['n_cycles']
n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size
return train(
save_path=save_path, policy=policy, rollout_worker=rollout_worker,
evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'],
n_cycles=params['n_cycles'], n_batches=params['n_batches'],
policy_save_interval=policy_save_interval, demo_file=demo_file)
@click.command()
@click.option('--env', type=str, default='FetchReach-v1', help='the name of the OpenAI Gym environment that you want to train on')
@click.option('--total_timesteps', type=int, default=int(5e5), help='the number of timesteps to run')
@click.option('--seed', type=int, default=0, help='the random seed used to seed both the environment and the training code')
@click.option('--policy_save_interval', type=int, default=5, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
@click.option('--replay_strategy', type=click.Choice(['future', 'none']), default='future', help='the HER replay strategy to be used. "future" uses HER, "none" disables HER.')
@click.option('--clip_return', type=int, default=1, help='whether or not returns should be clipped')
@click.option('--demo_file', type=str, default = 'PATH/TO/DEMO/DATA/FILE.npz', help='demo data file path')
def main(**kwargs):
learn(**kwargs)
if __name__ == '__main__':
main()
|
import threading
import numpy as np
from mpi4py import MPI
import tensorflow as tf
from baselines.her.util import reshape_for_broadcasting
class Normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf, sess=None):
"""A normalizer that ensures that observations are approximately distributed according to
a standard Normal distribution (i.e. have mean zero and variance one).
Args:
size (int): the size of the observation to be normalized
eps (float): a small constant that avoids underflows
default_clip_range (float): normalized observations are clipped to be in
[-default_clip_range, default_clip_range]
sess (object): the TensorFlow session to be used
"""
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = sess if sess is not None else tf.get_default_session()
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum',
trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq',
trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(
initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count',
trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(
initializer=tf.zeros_initializer(), shape=(self.size,), name='mean',
trainable=False, dtype=tf.float32)
self.std = tf.get_variable(
initializer=tf.ones_initializer(), shape=(self.size,), name='std',
trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(
self.count_tf.assign_add(self.count_pl),
self.sum_tf.assign_add(self.sum_pl),
self.sumsq_tf.assign_add(self.sumsq_pl)
)
self.recompute_op = tf.group(
tf.assign(self.mean, self.sum_tf / self.count_tf),
tf.assign(self.std, tf.sqrt(tf.maximum(
tf.square(self.eps),
self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)
))),
)
self.lock = threading.Lock()
def update(self, v):
v = v.reshape(-1, self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None):
if clip_range is None:
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return tf.clip_by_value((v - mean) / std, -clip_range, clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return mean + v * std
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return local_sum, local_sumsq, local_count
def recompute_stats(self):
with self.lock:
# Copy over results.
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# Reset.
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# We perform the synchronization outside of the lock to keep the critical section as short
# as possible.
synced_sum, synced_sumsq, synced_count = self.synchronize(
local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={
self.count_pl: synced_count,
self.sum_pl: synced_sum,
self.sumsq_pl: synced_sumsq,
})
self.sess.run(self.recompute_op)
class IdentityNormalizer:
def __init__(self, size, std=1.):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = std * tf.ones(self.size, tf.float32)
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return x / self.std
def denormalize(self, x):
return self.std * x
def synchronize(self):
pass
def recompute_stats(self):
pass
|
# DEPRECATED, use baselines.common.plot_util instead
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns; sns.set()
import glob2
import argparse
def smooth_reward_curve(x, y):
halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
k = halfwidth
xsmoo = x
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
mode='same')
return xsmoo, ysmoo
def load_results(file):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
def pad(xs, value=np.nan):
maxlen = np.max([len(x) for x in xs])
padded_xs = []
for x in xs:
if x.shape[0] >= maxlen:
padded_xs.append(x)
padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
x_padded = np.concatenate([x, padding], axis=0)
assert x_padded.shape[1:] == x.shape[1:]
assert x_padded.shape[0] == maxlen
padded_xs.append(x_padded)
return np.array(padded_xs)
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
args = parser.parse_args()
# Load all data.
data = {}
paths = [os.path.abspath(os.path.join(path, '..')) for path in glob2.glob(os.path.join(args.dir, '**', 'progress.csv'))]
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
results = load_results(os.path.join(curr_path, 'progress.csv'))
if not results:
print('skipping {}'.format(curr_path))
continue
print('loading {} ({})'.format(curr_path, len(results['epoch'])))
with open(os.path.join(curr_path, 'params.json'), 'r') as f:
params = json.load(f)
success_rate = np.array(results['test/success_rate'])
epoch = np.array(results['epoch']) + 1
env_id = params['env_name']
replay_strategy = params['replay_strategy']
if replay_strategy == 'future':
config = 'her'
else:
config = 'ddpg'
if 'Dense' in env_id:
config += '-dense'
else:
config += '-sparse'
env_id = env_id.replace('Dense', '')
# Process and smooth data.
assert success_rate.shape == epoch.shape
x = epoch
y = success_rate
if args.smooth:
x, y = smooth_reward_curve(epoch, success_rate)
assert x.shape == y.shape
if env_id not in data:
data[env_id] = {}
if config not in data[env_id]:
data[env_id][config] = []
data[env_id][config].append((x, y))
# Plot data.
for env_id in sorted(data.keys()):
print('exporting {}'.format(env_id))
plt.clf()
for config in sorted(data[env_id].keys()):
xs, ys = zip(*data[env_id][config])
xs, ys = pad(xs), pad(ys)
assert xs.shape == ys.shape
plt.plot(xs[0], np.nanmedian(ys, axis=0), label=config)
plt.fill_between(xs[0], np.nanpercentile(ys, 25, axis=0), np.nanpercentile(ys, 75, axis=0), alpha=0.25)
plt.title(env_id)
plt.xlabel('Epoch')
plt.ylabel('Median Success Rate')
plt.legend()
plt.savefig(os.path.join(args.dir, 'fig_{}.png'.format(env_id)))
|
import os
import numpy as np
import gym
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.her.her_sampler import make_sample_her_transitions
from baselines.bench.monitor import Monitor
DEFAULT_ENV_PARAMS = {
'FetchReach-v1': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.her.actor_critic:ActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'buffer_size': int(1E6), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 50, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 40, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'replay_strategy': 'future', # supported modes: future, none
'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
'bc_loss': 0, # whether or not to use the behavior cloning loss as an auxilliary loss
'q_filter': 0, # whether or not a Q value filter should be used on the Actor outputs
'num_demo': 100, # number of expert demo episodes
'demo_batch_size': 128, #number of samples to be used from the demonstrations buffer, per mpi thread 128/1024 or 32/256
'prm_loss_weight': 0.001, #Weight corresponding to the primary loss
'aux_loss_weight': 0.0078, #Weight corresponding to the auxilliary loss also called the cloning loss
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env(subrank=None):
env = gym.make(env_name)
if subrank is not None and logger.get_dir() is not None:
try:
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
MPI = None
mpi_rank = 0
logger.warn('Running with a single MPI process. This should work, but the results may differ from the ones publshed in Plappert et al.')
max_episode_steps = env._max_episode_steps
env = Monitor(env,
os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),
allow_early_resets=True)
# hack to re-expose _max_episode_steps (ideally should replace reliance on it downstream)
env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)
return env
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info): # vectorized
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
# Prepare configuration for HER.
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
# Extract relevant parameters.
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
input_dims = dims.copy()
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, # agent takes an input observations
'T': params['T'],
'clip_pos_returns': True, # clip positive returns
'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
'bc_loss': params['bc_loss'],
'q_filter': params['q_filter'],
'num_demo': params['num_demo'],
'demo_batch_size': params['demo_batch_size'],
'prm_loss_weight': params['prm_loss_weight'],
'aux_loss_weight': params['aux_loss_weight'],
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
dims = {
'o': obs['observation'].shape[0],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
|
# DEPRECATED, use --play flag to baselines.run instead
import click
import numpy as np
import pickle
from baselines import logger
from baselines.common import set_global_seeds
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
@click.command()
@click.argument('policy_file', type=str)
@click.option('--seed', type=int, default=0)
@click.option('--n_test_rollouts', type=int, default=10)
@click.option('--render', type=int, default=1)
def main(policy_file, seed, n_test_rollouts, render):
set_global_seeds(seed)
# Load policy.
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
# Prepare params.
params = config.DEFAULT_PARAMS
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params['env_name'] = env_name
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {
'exploit': True,
'use_target_net': params['test_with_polyak'],
'compute_Q': True,
'rollout_batch_size': 1,
'render': bool(render),
}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
# Run evaluation.
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# record logs
for key, val in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular()
if __name__ == '__main__':
main()
|
import gym
import numpy as np
"""Data generation for the case of a single block pick and place in Fetch Env"""
actions = []
observations = []
infos = []
def main():
env = gym.make('FetchPickAndPlace-v1')
numItr = 100
initStateSpace = "random"
env.reset()
print("Reset!")
while len(actions) < numItr:
obs = env.reset()
print("ITERATION NUMBER ", len(actions))
goToGoal(env, obs)
fileName = "data_fetch"
fileName += "_" + initStateSpace
fileName += "_" + str(numItr)
fileName += ".npz"
np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
def goToGoal(env, lastObs):
goal = lastObs['desired_goal']
objectPos = lastObs['observation'][3:6]
object_rel_pos = lastObs['observation'][6:9]
episodeAcs = []
episodeObs = []
episodeInfo = []
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03 # first make the gripper go slightly above the object
timeStep = 0 #count the total number of timesteps
episodeObs.append(lastObs)
while np.linalg.norm(object_oriented_goal) >= 0.005 and timeStep <= env._max_episode_steps:
env.render()
action = [0, 0, 0, 0]
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03
for i in range(len(object_oriented_goal)):
action[i] = object_oriented_goal[i]*6
action[len(action)-1] = 0.05 #open
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while np.linalg.norm(object_rel_pos) >= 0.005 and timeStep <= env._max_episode_steps :
env.render()
action = [0, 0, 0, 0]
for i in range(len(object_rel_pos)):
action[i] = object_rel_pos[i]*6
action[len(action)-1] = -0.005
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while np.linalg.norm(goal - objectPos) >= 0.01 and timeStep <= env._max_episode_steps :
env.render()
action = [0, 0, 0, 0]
for i in range(len(goal - objectPos)):
action[i] = (goal - objectPos)[i]*6
action[len(action)-1] = -0.005
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while True: #limit the number of timesteps in the episode to a fixed duration
env.render()
action = [0, 0, 0, 0]
action[len(action)-1] = -0.005 # keep the gripper closed
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
if timeStep >= env._max_episode_steps: break
actions.append(episodeAcs)
observations.append(episodeObs)
infos.append(episodeInfo)
if __name__ == "__main__":
main()
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
def build_q_func(network, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
if isinstance(network, str):
from baselines.common.models import get_network_builder
network = get_network_builder(network)(**network_kwargs)
def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
latent = network(input_placeholder)
if isinstance(latent, tuple):
if latent[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent = latent[0]
latent = layers.flatten(latent)
with tf.variable_scope("action_value"):
action_out = latent
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = latent
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
return q_func_builder
|
from baselines.deepq import models # noqa
from baselines.deepq.build_graph import build_act, build_train # noqa
from baselines.deepq.deepq import learn, load_act # noqa
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
|
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative no update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""Returns the name of current scope as a string, e.g. deepq/q_func"""
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values}
|
from baselines.common.input import observation_input
from baselines.common.tf_util import adjust_shape
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplementedError
def make_feed_dict(self, data):
"""Given data input it to the placeholder(s)."""
raise NotImplementedError
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: adjust_shape(self._placeholder, data)}
class ObservationInput(PlaceholderTfInput):
def __init__(self, observation_space, name=None):
"""Creates an input placeholder tailored to a specific observation space
Parameters
----------
observation_space:
observation space of the environment. Should be one of the gym.spaces types
name: str
tensorflow name of the underlying placeholder
"""
inpt, self.processed_inpt = observation_input(observation_space, name=name)
super().__init__(inpt)
def get(self):
return self.processed_inpt
|
import os
import tempfile
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import baselines.common.tf_util as U
from baselines.common.tf_util import load_variables, save_variables
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.common import set_global_seeds
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.tf_util import get_session
from baselines.deepq.models import build_q_func
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
self.initial_state = None
@staticmethod
def load_act(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepq.build_act(**act_params)
sess = tf.Session()
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
load_variables(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def step(self, observation, **kwargs):
# DQN doesn't use RNNs so we ignore states and masks
kwargs.pop('S', None)
kwargs.pop('M', None)
return self._act([observation], **kwargs), None, None, None
def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def save(self, path):
save_variables(path)
def load_act(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load_act(path)
def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batch sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act
|
import numpy as np
import random
from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
|
def atari():
return dict(
network='conv_only',
lr=1e-4,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True,
prioritized_replay_alpha=0.6,
checkpoint_freq=10000,
checkpoint_path=None,
dueling=True
)
def retro():
return atari()
|
import gym
import itertools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import baselines.common.tf_util as U
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.schedules import LinearSchedule
def model(inpt, num_actions, scope, reuse=False):
"""This model takes as input an observation and returns values of all actions."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
if __name__ == '__main__':
with U.make_session(num_cpu=8):
# Create the environment
env = gym.make("CartPole-v0")
# Create all the functions necessary to train the model
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
q_func=model,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
# Create the replay buffer
replay_buffer = ReplayBuffer(50000)
# Create the schedule for exploration starting from 1 (every action is random) down to
# 0.02 (98% of actions are selected according to values predicted by the model).
exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
obs = env.reset()
for t in itertools.count():
# Take action and update exploration to the newest value
action = act(obs[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0)
is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
if is_solved:
# Show off the result
env.render()
else:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if t > 1000:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32)
train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))
# Update target network periodically.
if t % 1000 == 0:
update_target()
if done and len(episode_rewards) % 10 == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", len(episode_rewards))
logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
|
import gym
from baselines import deepq
def main():
env = gym.make("PongNoFrameskip-v4")
env = deepq.wrap_atari_dqn(env)
model = deepq.learn(
env,
"conv_only",
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
total_timesteps=0
)
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(model(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
|
import gym
from baselines import deepq
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = gym.make("CartPole-v0")
act = deepq.learn(
env,
network='mlp',
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
|
import gym
from baselines import deepq
def main():
env = gym.make("CartPole-v0")
act = deepq.learn(env, network='mlp', total_timesteps=0, load_path="cartpole_model.pkl")
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
|
import gym
from baselines import deepq
from baselines.common import models
def main():
env = gym.make("MountainCar-v0")
# Enabling layer_norm here is import for parameter space noise!
act = deepq.learn(
env,
network=models.mlp(num_hidden=64, num_layers=1),
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.1,
print_freq=10,
param_noise=True
)
print("Saving model to mountaincar_model.pkl")
act.save("mountaincar_model.pkl")
if __name__ == '__main__':
main()
|
from baselines import deepq
from baselines import bench
from baselines import logger
from baselines.common.atari_wrappers import make_atari
def main():
logger.configure()
env = make_atari('PongNoFrameskip-v4')
env = bench.Monitor(env, logger.get_dir())
env = deepq.wrap_atari_dqn(env)
model = deepq.learn(
env,
"conv_only",
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
lr=1e-4,
total_timesteps=int(1e7),
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
)
model.save('pong_model.pkl')
env.close()
if __name__ == '__main__':
main()
|
import gym
from baselines import deepq
from baselines.common import models
def main():
env = gym.make("MountainCar-v0")
act = deepq.learn(
env,
network=models.mlp(num_layers=1, num_hidden=64),
total_timesteps=0,
load_path='mountaincar_model.pkl'
)
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
|
import tensorflow as tf
def gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):
assert reduce_dim is not None
# weird batch matmul
if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:
# reshape reduce_dim to the left most dim in b
b_shape = b.get_shape()
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(reduce_dim)
b_dims.insert(0, reduce_dim)
b = tf.transpose(b, b_dims)
b_t_shape = b.get_shape()
b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, b_t_shape)
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(0)
b_dims.insert(reduce_dim, 0)
result = tf.transpose(result, b_dims)
return result
elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:
# reshape reduce_dim to the right most dim in a
a_shape = a.get_shape()
outter_dim = len(a_shape) - 1
reduce_dim = len(a_shape) - reduce_dim - 1
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(reduce_dim)
a_dims.insert(outter_dim, reduce_dim)
a = tf.transpose(a, a_dims)
a_t_shape = a.get_shape()
a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, a_t_shape)
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(outter_dim)
a_dims.insert(reduce_dim, outter_dim)
result = tf.transpose(result, a_dims)
return result
elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
assert False, 'something went wrong'
def clipoutNeg(vec, threshold=1e-6):
mask = tf.cast(vec > threshold, tf.float32)
return mask * vec
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def factorReshape(Q, e, grad, facIndx=0, ftype='act'):
grad_shape = grad.get_shape()
if ftype == 'act':
assert e.get_shape()[0] == grad_shape[facIndx]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[facIndx] = -1
e = tf.reshape(e, expanded_shape)
if ftype == 'grad':
assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[len(grad_shape) - facIndx - 1] = -1
e = tf.reshape(e, expanded_shape)
return Q, e
|
import os.path as osp
import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common.policies import build_policy
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.a2c.runner import Runner
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.acktr import kfac
from baselines.ppo2.ppo2 import safemean
from collections import deque
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs,total_timesteps, nprocs=32, nsteps=20,
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, lrschedule='linear', is_async=True):
self.sess = sess = get_session()
nbatch = nenvs * nsteps
with tf.variable_scope('acktr_model', reuse=tf.AUTO_REUSE):
self.model = step_model = policy(nenvs, 1, sess=sess)
self.model2 = train_model = policy(nenvs*nsteps, nsteps, sess=sess)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
PG_LR = tf.placeholder(tf.float32, [])
VF_LR = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
self.logits = train_model.pi
##training loss
pg_loss = tf.reduce_mean(ADV*neglogpac)
entropy = tf.reduce_mean(train_model.pd.entropy())
pg_loss = pg_loss - ent_coef * entropy
vf_loss = tf.losses.mean_squared_error(tf.squeeze(train_model.vf), R)
train_loss = pg_loss + vf_coef * vf_loss
##Fisher loss construction
self.pg_fisher = pg_fisher_loss = -tf.reduce_mean(neglogpac)
sample_net = train_model.vf + tf.random_normal(tf.shape(train_model.vf))
self.vf_fisher = vf_fisher_loss = - vf_fisher_coef*tf.reduce_mean(tf.pow(train_model.vf - tf.stop_gradient(sample_net), 2))
self.joint_fisher = joint_fisher_loss = pg_fisher_loss + vf_fisher_loss
self.params=params = find_trainable_variables("acktr_model")
self.grads_check = grads = tf.gradients(train_loss,params)
with tf.device('/gpu:0'):
self.optim = optim = kfac.KfacOptimizer(learning_rate=PG_LR, clip_kl=kfac_clip,\
momentum=0.9, kfac_update=1, epsilon=0.01,\
stats_decay=0.99, is_async=is_async, cold_iter=10, max_grad_norm=max_grad_norm)
# update_stats_op = optim.compute_and_apply_stats(joint_fisher_loss, var_list=params)
optim.compute_and_apply_stats(joint_fisher_loss, var_list=params)
train_op, q_runner = optim.apply_gradients(list(zip(grads,params)))
self.q_runner = q_runner
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
advs = rewards - values
for step in range(len(obs)):
cur_lr = self.lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, PG_LR:cur_lr, VF_LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, train_op],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=sess)
def learn(network, env, seed, total_timesteps=int(40e6), gamma=0.99, log_interval=100, nprocs=32, nsteps=20,
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, save_interval=None, lrschedule='linear', load_path=None, is_async=True, **network_kwargs):
set_global_seeds(seed)
if network == 'cnn':
network_kwargs['one_dim_bias'] = True
policy = build_policy(env, network, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
make_model = lambda : Model(policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=nprocs, nsteps
=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, vf_fisher_coef=
vf_fisher_coef, lr=lr, max_grad_norm=max_grad_norm, kfac_clip=kfac_clip,
lrschedule=lrschedule, is_async=is_async)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if load_path is not None:
model.load(load_path)
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
nbatch = nenvs*nsteps
tstart = time.time()
coord = tf.train.Coordinator()
if is_async:
enqueue_threads = model.q_runner.create_threads(model.sess, coord=coord, start=True)
else:
enqueue_threads = []
for update in range(1, total_timesteps//nbatch+1):
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
model.old_obs = obs
nseconds = time.time()-tstart
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("policy_loss", float(policy_loss))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
savepath = osp.join(logger.get_dir(), 'checkpoint%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
coord.request_stop()
coord.join(enqueue_threads)
return model
|
import tensorflow as tf
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with tf.variable_scope(name, reuse=reuse):
assert (len(tf.get_variable_scope().name.split('/')) == 2)
w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(x, w), b)
def kl_div(action_dist1, action_dist2, action_size):
mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]
mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]
numerator = tf.square(mean1 - mean2) + tf.square(std1) - tf.square(std2)
denominator = 2 * tf.square(std2) + 1e-8
return tf.reduce_sum(
numerator/denominator + tf.log(std2) - tf.log(std1),reduction_indices=-1)
|
def mujoco():
return dict(
nsteps=2500,
value_network='copy'
)
|
import tensorflow as tf
import numpy as np
import re
# flake8: noqa F403, F405
from baselines.acktr.kfac_utils import *
from functools import reduce
KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd']
KFAC_DEBUG = False
class KfacOptimizer():
# note that KfacOptimizer will be truly synchronous (and thus deterministic) only if a single-threaded session is used
def __init__(self, learning_rate=0.01, momentum=0.9, clip_kl=0.01, kfac_update=2, stats_accum_iter=60, full_stats_init=False, cold_iter=100, cold_lr=None, is_async=False, async_stats=False, epsilon=1e-2, stats_decay=0.95, blockdiag_bias=False, channel_fac=False, factored_damping=False, approxT2=False, use_float64=False, weight_decay_dict={},max_grad_norm=0.5):
self.max_grad_norm = max_grad_norm
self._lr = learning_rate
self._momentum = momentum
self._clip_kl = clip_kl
self._channel_fac = channel_fac
self._kfac_update = kfac_update
self._async = is_async
self._async_stats = async_stats
self._epsilon = epsilon
self._stats_decay = stats_decay
self._blockdiag_bias = blockdiag_bias
self._approxT2 = approxT2
self._use_float64 = use_float64
self._factored_damping = factored_damping
self._cold_iter = cold_iter
if cold_lr == None:
# good heuristics
self._cold_lr = self._lr# * 3.
else:
self._cold_lr = cold_lr
self._stats_accum_iter = stats_accum_iter
self._weight_decay_dict = weight_decay_dict
self._diag_init_coeff = 0.
self._full_stats_init = full_stats_init
if not self._full_stats_init:
self._stats_accum_iter = self._cold_iter
self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False)
self.global_step = tf.Variable(
0, name='KFAC/global_step', trainable=False)
self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False)
self.factor_step = tf.Variable(
0, name='KFAC/factor_step', trainable=False)
self.stats_step = tf.Variable(
0, name='KFAC/stats_step', trainable=False)
self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False)
self.factors = {}
self.param_vars = []
self.stats = {}
self.stats_eigen = {}
def getFactors(self, g, varlist):
graph = tf.get_default_graph()
factorTensors = {}
fpropTensors = []
bpropTensors = []
opTypes = []
fops = []
def searchFactors(gradient, graph):
# hard coded search stratergy
bpropOp = gradient.op
bpropOp_name = bpropOp.name
bTensors = []
fTensors = []
# combining additive gradient, assume they are the same op type and
# indepedent
if 'AddN' in bpropOp_name:
factors = []
for g in gradient.op.inputs:
factors.append(searchFactors(g, graph))
op_names = [item['opName'] for item in factors]
# TO-DO: need to check all the attribute of the ops as well
print (gradient.name)
print (op_names)
print (len(np.unique(op_names)))
assert len(np.unique(op_names)) == 1, gradient.name + \
' is shared among different computation OPs'
bTensors = reduce(lambda x, y: x + y,
[item['bpropFactors'] for item in factors])
if len(factors[0]['fpropFactors']) > 0:
fTensors = reduce(
lambda x, y: x + y, [item['fpropFactors'] for item in factors])
fpropOp_name = op_names[0]
fpropOp = factors[0]['op']
else:
fpropOp_name = re.search(
'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2)
fpropOp = graph.get_operation_by_name(fpropOp_name)
if fpropOp.op_def.name in KFAC_OPS:
# Known OPs
###
bTensor = [
i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1]
bTensorShape = fpropOp.outputs[0].get_shape()
if bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
###
if fpropOp.op_def.name == 'BiasAdd':
fTensors = []
else:
fTensors.append(
[i for i in fpropOp.inputs if param.op.name not in i.name][0])
fpropOp_name = fpropOp.op_def.name
else:
# unknown OPs, block approximation used
bInputsList = [i for i in bpropOp.inputs[
0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name]
if len(bInputsList) > 0:
bTensor = bInputsList[0]
bTensorShape = fpropOp.outputs[0].get_shape()
if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name)
return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors}
for t, param in zip(g, varlist):
if KFAC_DEBUG:
print(('get factor for '+param.name))
factors = searchFactors(t, graph)
factorTensors[param] = factors
########
# check associated weights and bias for homogeneous coordinate representation
# and check redundent factors
# TO-DO: there may be a bug to detect associate bias and weights for
# forking layer, e.g. in inception models.
for param in varlist:
factorTensors[param]['assnWeights'] = None
factorTensors[param]['assnBias'] = None
for param in varlist:
if factorTensors[param]['opName'] == 'BiasAdd':
factorTensors[param]['assnWeights'] = None
for item in varlist:
if len(factorTensors[item]['bpropFactors']) > 0:
if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (len(factorTensors[item]['fpropFactors']) > 0):
factorTensors[param]['assnWeights'] = item
factorTensors[item]['assnBias'] = param
factorTensors[param]['bpropFactors'] = factorTensors[
item]['bpropFactors']
########
########
# concatenate the additive gradients along the batch dimension, i.e.
# assuming independence structure
for key in ['fpropFactors', 'bpropFactors']:
for i, param in enumerate(varlist):
if len(factorTensors[param][key]) > 0:
if (key + '_concat') not in factorTensors[param]:
name_scope = factorTensors[param][key][0].name.split(':')[
0]
with tf.name_scope(name_scope):
factorTensors[param][
key + '_concat'] = tf.concat(factorTensors[param][key], 0)
else:
factorTensors[param][key + '_concat'] = None
for j, param2 in enumerate(varlist[(i + 1):]):
if (len(factorTensors[param][key]) > 0) and (set(factorTensors[param2][key]) == set(factorTensors[param][key])):
factorTensors[param2][key] = factorTensors[param][key]
factorTensors[param2][
key + '_concat'] = factorTensors[param][key + '_concat']
########
if KFAC_DEBUG:
for items in zip(varlist, fpropTensors, bpropTensors, opTypes):
print((items[0].name, factorTensors[item]))
self.factors = factorTensors
return factorTensors
def getStats(self, factors, varlist):
if len(self.stats) == 0:
# initialize stats variables on CPU because eigen decomp is
# computed on CPU
with tf.device('/cpu'):
tmpStatsCache = {}
# search for tensor factors and
# use block diag approx for the bias units
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels do not support
# homogeneous coordinate
var_assnBias = factors[var]['assnBias']
if var_assnBias:
factors[var]['assnBias'] = None
factors[var_assnBias]['assnWeights'] = None
##
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
self.stats[var] = {'opName': opType,
'fprop_concat_stats': [],
'bprop_concat_stats': [],
'assnWeights': factors[var]['assnWeights'],
'assnBias': factors[var]['assnBias'],
}
if fpropFactor is not None:
if fpropFactor not in tmpStatsCache:
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence between input channels and spatial
# 2K-1 x 2K-1 covariance matrix and C x C covariance matrix
# factorization along the channels do not
# support homogeneous coordinate, assnBias
# is always None
fpropFactor2_size = Kh * Kw
slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones(
[fpropFactor2_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats2)
fpropFactor_size = C
else:
# 2K-1 x 2K-1 x C x C covariance matrix
# assume BHWC
fpropFactor_size = Kh * Kw * C
else:
# D x D covariance matrix
fpropFactor_size = fpropFactor.get_shape()[-1]
# use homogeneous coordinate
if not self._blockdiag_bias and self.stats[var]['assnBias']:
fpropFactor_size += 1
slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones(
[fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats)
if opType != 'Conv2D':
tmpStatsCache[fpropFactor] = self.stats[
var]['fprop_concat_stats']
else:
self.stats[var][
'fprop_concat_stats'] = tmpStatsCache[fpropFactor]
if bpropFactor is not None:
# no need to collect backward stats for bias vectors if
# using homogeneous coordinates
if not((not self._blockdiag_bias) and self.stats[var]['assnWeights']):
if bpropFactor not in tmpStatsCache:
slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape(
)[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name, trainable=False)
self.stats[var]['bprop_concat_stats'].append(
slot_bpropFactor_stats)
tmpStatsCache[bpropFactor] = self.stats[
var]['bprop_concat_stats']
else:
self.stats[var][
'bprop_concat_stats'] = tmpStatsCache[bpropFactor]
return self.stats
def compute_and_apply_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
stats = self.compute_stats(loss_sampled, var_list=varlist)
return self.apply_stats(stats)
def compute_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled')
self.gs = gs
factors = self.getFactors(gs, varlist)
stats = self.getStats(factors, varlist)
updateOps = []
statsUpdates = {}
statsUpdates_cache = {}
for var in varlist:
opType = factors[var]['opName']
fops = factors[var]['op']
fpropFactor = factors[var]['fpropFactors_concat']
fpropStats_vars = stats[var]['fprop_concat_stats']
bpropFactor = factors[var]['bpropFactors_concat']
bpropStats_vars = stats[var]['bprop_concat_stats']
SVD_factors = {}
for stats_var in fpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_fpropFactor = fpropFactor
B = (tf.shape(fpropFactor)[0]) # batch size
if opType == 'Conv2D':
strides = fops.get_attr("strides")
padding = fops.get_attr("padding")
convkernel_size = var.get_shape()[0:3]
KH = int(convkernel_size[0])
KW = int(convkernel_size[1])
C = int(convkernel_size[2])
flatten_size = int(KH * KW * C)
Oh = int(bpropFactor.get_shape()[1])
Ow = int(bpropFactor.get_shape()[2])
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence among input channels
# factor = B x 1 x 1 x (KH xKW x C)
# patches = B x Oh x Ow x (KH xKW x C)
if len(SVD_factors) == 0:
if KFAC_DEBUG:
print(('approx %s act factor with rank-1 SVD factors' % (var.name)))
# find closest rank-1 approx to the feature map
S, U, V = tf.batch_svd(tf.reshape(
fpropFactor, [-1, KH * KW, C]))
# get rank-1 approx slides
sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1)
patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW
full_factor_shape = fpropFactor.get_shape()
patches_k.set_shape(
[full_factor_shape[0], KH * KW])
patches_c = V[:, :, 0] * sqrtS1 # B x C
patches_c.set_shape([full_factor_shape[0], C])
SVD_factors[C] = patches_c
SVD_factors[KH * KW] = patches_k
fpropFactor = SVD_factors[stats_var_dim]
else:
# poor mem usage implementation
patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[
0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding)
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 act fisher for %s' % (var.name)))
# T^2 terms * 1/T^2, size: B x C
fpropFactor = tf.reduce_mean(patches, [1, 2])
else:
# size: (B x Oh x Ow) x C
fpropFactor = tf.reshape(
patches, [-1, flatten_size]) / Oh / Ow
fpropFactor_size = int(fpropFactor.get_shape()[-1])
if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias:
if opType == 'Conv2D' and not self._approxT2:
# correct padding for numerical stability (we
# divided out OhxOw from activations for T1 approx)
fpropFactor = tf.concat([fpropFactor, tf.ones(
[tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1)
else:
# use homogeneous coordinates
fpropFactor = tf.concat(
[fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1)
# average over the number of data points in a batch
# divided by B
cov = tf.matmul(fpropFactor, fpropFactor,
transpose_a=True) / tf.cast(B, tf.float32)
updateOps.append(cov)
statsUpdates[stats_var] = cov
if opType != 'Conv2D':
# HACK: for convolution we recompute fprop stats for
# every layer including forking layers
statsUpdates_cache[stats_var] = cov
for stats_var in bpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_bpropFactor = bpropFactor
bpropFactor_shape = bpropFactor.get_shape()
B = tf.shape(bpropFactor)[0] # batch size
C = int(bpropFactor_shape[-1]) # num channels
if opType == 'Conv2D' or len(bpropFactor_shape) == 4:
if fpropFactor is not None:
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 grad fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(
bpropFactor, [1, 2]) # T^2 terms * 1/T^2
else:
bpropFactor = tf.reshape(
bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms
else:
# just doing block diag approx. spatial independent
# structure does not apply here. summing over
# spatial locations
if KFAC_DEBUG:
print(('block diag approx fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])
# assume sampled loss is averaged. TO-DO:figure out better
# way to handle this
bpropFactor *= tf.to_float(B)
##
cov_b = tf.matmul(
bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])
updateOps.append(cov_b)
statsUpdates[stats_var] = cov_b
statsUpdates_cache[stats_var] = cov_b
if KFAC_DEBUG:
aKey = list(statsUpdates.keys())[0]
statsUpdates[aKey] = tf.Print(statsUpdates[aKey],
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor(
'computing stats'),
])
self.statsUpdates = statsUpdates
return statsUpdates
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
if KFAC_DEBUG:
stats_step_op = (tf.Print(stats_step_op,
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor('fac step:'),
self.factor_step,
tf.convert_to_tensor('sgd step:'),
self.sgd_step,
tf.convert_to_tensor('Accum:'),
tf.convert_to_tensor(accumulate),
tf.convert_to_tensor('Accum coeff:'),
tf.convert_to_tensor(accumulateCoeff),
tf.convert_to_tensor('stat step:'),
self.stats_step, updateOps[0], updateOps[1]]))
return [stats_step_op, ]
def getStatsEigen(self, stats=None):
if len(self.stats_eigen) == 0:
stats_eigen = {}
if stats is None:
stats = self.stats
tmpEigenCache = {}
with tf.device('/cpu:0'):
for var in stats:
for key in ['fprop_concat_stats', 'bprop_concat_stats']:
for stats_var in stats[var][key]:
if stats_var not in tmpEigenCache:
stats_dim = stats_var.get_shape()[1].value
e = tf.Variable(tf.ones(
[stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e', trainable=False)
Q = tf.Variable(tf.diag(tf.ones(
[stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q', trainable=False)
stats_eigen[stats_var] = {'e': e, 'Q': Q}
tmpEigenCache[
stats_var] = stats_eigen[stats_var]
else:
stats_eigen[stats_var] = tmpEigenCache[
stats_var]
self.stats_eigen = stats_eigen
return self.stats_eigen
def computeStatsEigen(self):
""" compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """
# TO-DO: figure out why this op has delays (possibly moving
# eigenvectors around?)
with tf.device('/cpu:0'):
def removeNone(tensor_list):
local_list = []
for item in tensor_list:
if item is not None:
local_list.append(item)
return local_list
def copyStats(var_list):
print("copying stats to buffer tensors before eigen decomp")
redundant_stats = {}
copied_list = []
for item in var_list:
if item is not None:
if item not in redundant_stats:
if self._use_float64:
redundant_stats[item] = tf.cast(
tf.identity(item), tf.float64)
else:
redundant_stats[item] = tf.identity(item)
copied_list.append(redundant_stats[item])
else:
copied_list.append(None)
return copied_list
#stats = [copyStats(self.fStats), copyStats(self.bStats)]
#stats = [self.fStats, self.bStats]
stats_eigen = self.stats_eigen
computedEigen = {}
eigen_reverse_lookup = {}
updateOps = []
# sync copied stats
# with tf.control_dependencies(removeNone(stats[0]) +
# removeNone(stats[1])):
with tf.control_dependencies([]):
for stats_var in stats_eigen:
if stats_var not in computedEigen:
eigens = tf.self_adjoint_eig(stats_var)
e = eigens[0]
Q = eigens[1]
if self._use_float64:
e = tf.cast(e, tf.float32)
Q = tf.cast(Q, tf.float32)
updateOps.append(e)
updateOps.append(Q)
computedEigen[stats_var] = {'e': e, 'Q': Q}
eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']
eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']
self.eigen_reverse_lookup = eigen_reverse_lookup
self.eigen_update_list = updateOps
if KFAC_DEBUG:
self.eigen_update_list = [item for item in updateOps]
with tf.control_dependencies(updateOps):
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('computed factor eigen')]))
return updateOps
def applyStatsEigen(self, eigen_list):
updateOps = []
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
updateOps.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(updateOps):
factor_step_op = tf.assign_add(self.factor_step, 1)
updateOps.append(factor_step_op)
if KFAC_DEBUG:
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('updated kfac factors')]))
return updateOps
def getKfacPrecondUpdates(self, gradlist, varlist):
updatelist = []
vg = 0.
assert len(self.stats) > 0
assert len(self.stats_eigen) > 0
assert len(self.factors) > 0
counter = 0
grad_dict = {var: grad for grad, var in zip(gradlist, varlist)}
for grad, var in zip(gradlist, varlist):
GRAD_RESHAPE = False
GRAD_TRANSPOSE = False
fpropFactoredFishers = self.stats[var]['fprop_concat_stats']
bpropFactoredFishers = self.stats[var]['bprop_concat_stats']
if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0:
counter += 1
GRAD_SHAPE = grad.get_shape()
if len(grad.get_shape()) > 2:
# reshape conv kernel parameters
KW = int(grad.get_shape()[0])
KH = int(grad.get_shape()[1])
C = int(grad.get_shape()[2])
D = int(grad.get_shape()[3])
if len(fpropFactoredFishers) > 1 and self._channel_fac:
# reshape conv kernel parameters into tensor
grad = tf.reshape(grad, [KW * KH, C, D])
else:
# reshape conv kernel parameters into 2D grad
grad = tf.reshape(grad, [-1, D])
GRAD_RESHAPE = True
elif len(grad.get_shape()) == 1:
# reshape bias or 1D parameters
D = int(grad.get_shape()[0])
grad = tf.expand_dims(grad, 0)
GRAD_RESHAPE = True
else:
# 2D parameters
C = int(grad.get_shape()[0])
D = int(grad.get_shape()[1])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# stack bias grad
var_assnBias = self.stats[var]['assnBias']
grad = tf.concat(
[grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0)
# project gradient to eigen space and reshape the eigenvalues
# for broadcasting
eigVals = []
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='act', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act')
eigVals.append(e)
grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='grad', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad')
eigVals.append(e)
grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx)
##
#####
# whiten using eigenvalues
weightDecayCoeff = 0.
if var in self._weight_decay_dict:
weightDecayCoeff = self._weight_decay_dict[var]
if KFAC_DEBUG:
print(('weight decay coeff for %s is %f' % (var.name, weightDecayCoeff)))
if self._factored_damping:
if KFAC_DEBUG:
print(('use factored damping for %s' % (var.name)))
coeffs = 1.
num_factors = len(eigVals)
# compute the ratio of two trace norm of the left and right
# KFac matrices, and their generalization
if len(eigVals) == 1:
damping = self._epsilon + weightDecayCoeff
else:
damping = tf.pow(
self._epsilon + weightDecayCoeff, 1. / num_factors)
eigVals_tnorm_avg = [tf.reduce_mean(
tf.abs(e)) for e in eigVals]
for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg):
eig_tnorm_negList = [
item for item in eigVals_tnorm_avg if item != e_tnorm]
if len(eigVals) == 1:
adjustment = 1.
elif len(eigVals) == 2:
adjustment = tf.sqrt(
e_tnorm / eig_tnorm_negList[0])
else:
eig_tnorm_negList_prod = reduce(
lambda x, y: x * y, eig_tnorm_negList)
adjustment = tf.pow(
tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors)
coeffs *= (e + adjustment * damping)
else:
coeffs = 1.
damping = (self._epsilon + weightDecayCoeff)
for e in eigVals:
coeffs *= e
coeffs += damping
#grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()])
grad /= coeffs
#grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()])
#####
# project gradient back to euclidean space
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx)
##
#grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# un-stack bias grad
var_assnBias = self.stats[var]['assnBias']
C_plus_one = int(grad.get_shape()[0])
grad_assnBias = tf.reshape(tf.slice(grad,
begin=[
C_plus_one - 1, 0],
size=[1, -1]), var_assnBias.get_shape())
grad_assnWeights = tf.slice(grad,
begin=[0, 0],
size=[C_plus_one - 1, -1])
grad_dict[var_assnBias] = grad_assnBias
grad = grad_assnWeights
#grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()])
if GRAD_RESHAPE:
grad = tf.reshape(grad, GRAD_SHAPE)
grad_dict[var] = grad
print(('projecting %d gradient matrices' % counter))
for g, var in zip(gradlist, varlist):
grad = grad_dict[var]
### clipping ###
if KFAC_DEBUG:
print(('apply clipping to %s' % (var.name)))
tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad")
local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr))
vg += local_vg
# recale everything
if KFAC_DEBUG:
print('apply vFv clipping')
scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg))
if KFAC_DEBUG:
scaling = tf.Print(scaling, [tf.convert_to_tensor(
'clip: '), scaling, tf.convert_to_tensor(' vFv: '), vg])
with tf.control_dependencies([tf.assign(self.vFv, vg)]):
updatelist = [grad_dict[var] for var in varlist]
for i, item in enumerate(updatelist):
updatelist[i] = scaling * item
return updatelist
def compute_gradients(self, loss, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
g = tf.gradients(loss, varlist)
return [(a, b) for a, b in zip(g, varlist)]
def apply_gradients_kfac(self, grads):
g, varlist = list(zip(*grads))
if len(self.stats_eigen) == 0:
self.getStatsEigen()
qr = None
# launch eigen-decomp on a queue thread
if self._async:
print('Use async eigen decomp')
# get a list of factor loading tensors
factorOps_dummy = self.computeStatsEigen()
# define a queue for the list of factor loading tensors
queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[
item.get_shape() for item in factorOps_dummy])
enqueue_op = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(
0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op)
def dequeue_op():
return queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue_op])
updateOps = []
global_step_op = tf.assign_add(self.global_step, 1)
updateOps.append(global_step_op)
with tf.control_dependencies([global_step_op]):
# compute updates
assert self._update_stats_op != None
updateOps.append(self._update_stats_op)
dependency_list = []
if not self._async:
dependency_list.append(self._update_stats_op)
with tf.control_dependencies(dependency_list):
def no_op_wrapper():
return tf.group(*[tf.assign_add(self.cold_step, 1)])
if not self._async:
# synchronous eigen-decomp updates
updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update),
tf.convert_to_tensor(0)),
tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())), no_op_wrapper)
else:
# asynchronous eigen-decomp updates using queue
updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter),
lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)),
tf.no_op,
lambda: tf.group(
*self.applyStatsEigen(dequeue_op())),
),
no_op_wrapper)
updateOps.append(updateFactorOps)
with tf.control_dependencies([updateFactorOps]):
def gradOp():
return list(g)
def getKfacGradOp():
return self.getKfacPrecondUpdates(g, varlist)
u = tf.cond(tf.greater(self.factor_step,
tf.convert_to_tensor(0)), getKfacGradOp, gradOp)
optim = tf.train.MomentumOptimizer(
self._lr * (1. - self._momentum), self._momentum)
#optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01)
def optimOp():
def updateOptimOp():
if self._full_stats_init:
return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
else:
return optim.apply_gradients(list(zip(u, varlist)))
if self._full_stats_init:
return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
else:
return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
updateOps.append(optimOp())
return tf.group(*updateOps), qr
def apply_gradients(self, grads):
coldOptim = tf.train.MomentumOptimizer(
self._cold_lr, self._momentum)
def coldSGDstart():
sgd_grads, sgd_var = zip(*grads)
if self.max_grad_norm != None:
sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm)
sgd_grads = list(zip(sgd_grads,sgd_var))
sgd_step_op = tf.assign_add(self.sgd_step, 1)
coldOptim_op = coldOptim.apply_gradients(sgd_grads)
if KFAC_DEBUG:
with tf.control_dependencies([sgd_step_op, coldOptim_op]):
sgd_step_op = tf.Print(
sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')])
return tf.group(*[sgd_step_op, coldOptim_op])
kfacOptim_op, qr = self.apply_gradients_kfac(grads)
def warmKFACstart():
return kfacOptim_op
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
def minimize(self, loss, loss_sampled, var_list=None):
grads = self.compute_gradients(loss, var_list=var_list)
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
return self.apply_gradients(grads)
|
import numpy as np
from baselines.a2c.utils import discount_with_dones
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this class to generate batches of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch of experiences
"""
def __init__(self, env, model, nsteps=5, gamma=0.99):
super().__init__(env=env, model=model, nsteps=nsteps)
self.gamma = gamma
self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()]
self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype
def run(self):
# We initialize the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
mb_states = self.states
epinfos = []
for n in range(self.nsteps):
# Given observations, take action and value (V(s))
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
# Append the experiences
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
# Take actions in env and look the results
obs, rewards, dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
# Batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
if self.gamma > 0.0:
# Discount/bootstrap off value fn
last_values = self.model.value(self.obs, S=self.states, M=self.dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
mb_actions = mb_actions.reshape(self.batch_action_shape)
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, epinfos
|
import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
|
import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from baselines.ppo2.ppo2 import safemean
from collections import deque
from tensorflow import losses
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess)
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables("a2c_model")
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model
|
from baselines.common import explained_variance, zipsame, dataset
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from baselines.common import colorize
from collections import deque
from baselines.common import set_global_seeds
from baselines.common.mpi_adam import MpiAdam
from baselines.common.cg import cg
from baselines.common.input import observation_placeholder
from baselines.common.policies import build_policy
from contextlib import contextmanager
try:
from mpi4py import MPI
except ImportError:
MPI = None
def traj_segment_generator(pi, env, horizon, stochastic):
# Initialize state variables
t = 0
ac = env.action_space.sample()
new = True
rew = 0.0
ob = env.reset()
cur_ep_ret = 0
cur_ep_len = 0
ep_rets = []
ep_lens = []
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
while True:
prevac = ac
ac, vpred, _, _ = pi.step(ob, stochastic=stochastic)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens}
_, vpred, _, _ = pi.step(ob, stochastic=stochastic)
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
ob, rew, new, _ = env.step(ac)
rews[i] = rew
cur_ep_ret += rew
cur_ep_len += 1
if new:
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(*,
network,
env,
total_timesteps,
timesteps_per_batch=1024, # what to train on
max_kl=0.001,
cg_iters=10,
gamma=0.99,
lam=1.0, # advantage estimation
seed=None,
ent_coef=0.0,
cg_damping=1e-2,
vf_stepsize=3e-4,
vf_iters =3,
max_episodes=0, max_iters=0, # time constraint
callback=None,
load_path=None,
**network_kwargs
):
'''
learn a policy function with TRPO algorithm
Parameters:
----------
network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)
or function that takes input placeholder and returns tuple (output, None) for feedforward nets
or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets
env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class
timesteps_per_batch timesteps per gradient estimation batch
max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) )
ent_coef coefficient of policy entropy term in the optimization objective
cg_iters number of iterations of conjugate gradient algorithm
cg_damping conjugate gradient damping
vf_stepsize learning rate for adam optimizer used to optimie value function loss
vf_iters number of iterations of value function optimization iterations per each policy optimization step
total_timesteps max number of timesteps
max_episodes max number of episodes
max_iters maximum number of policy optimization iterations
callback function to be called with (locals(), globals()) each policy optimization step
load_path str, path to load the model from (default: None, i.e. no model is loaded)
**network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
Returns:
-------
learnt model
'''
if MPI is not None:
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
else:
nworkers = 1
rank = 0
cpus_per_worker = 1
U.get_session(config=tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=cpus_per_worker,
intra_op_parallelism_threads=cpus_per_worker
))
policy = build_policy(env, network, value_network='copy', **network_kwargs)
set_global_seeds(seed)
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
ob = observation_placeholder(ob_space)
with tf.variable_scope("pi"):
pi = policy(observ_placeholder=ob)
with tf.variable_scope("oldpi"):
oldpi = policy(observ_placeholder=ob)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = ent_coef * meanent
vferr = tf.reduce_mean(tf.square(pi.vf - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = get_trainable_variables("pi")
# var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")]
# vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")]
var_list = get_pi_trainable_variables("pi")
vf_var_list = get_vf_trainable_variables("pi")
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(get_variables("oldpi"), get_variables("pi"))])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
if MPI is not None:
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
else:
out = np.copy(x)
return out
U.initialize()
if load_path is not None:
pi.load(load_path)
th_init = get_flat()
if MPI is not None:
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
if sum([max_iters>0, total_timesteps>0, max_episodes>0])==0:
# noththing to be done
return pi
assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \
'out of max_iters, total_timesteps, and max_episodes only one should be specified'
while True:
if callback: callback(locals(), globals())
if total_timesteps and timesteps_so_far >= total_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************"%iters_so_far)
with timed("sampling"):
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==0)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f"%(expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=64):
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
if MPI is not None:
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
else:
listoflrpairs = [lrlocal]
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if rank==0:
logger.dump_tabular()
return pi
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
def get_variables(scope):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
def get_trainable_variables(scope):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
def get_vf_trainable_variables(scope):
return [v for v in get_trainable_variables(scope) if 'vf' in v.name[len(scope):].split('/')]
def get_pi_trainable_variables(scope):
return [v for v in get_trainable_variables(scope) if 'pi' in v.name[len(scope):].split('/')]
|
from baselines.common.models import mlp, cnn_small
def atari():
return dict(
network = cnn_small(),
timesteps_per_batch=512,
max_kl=0.001,
cg_iters=10,
cg_damping=1e-3,
gamma=0.98,
lam=1.0,
vf_iters=3,
vf_stepsize=1e-4,
entcoeff=0.00,
)
def mujoco():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for pybox2d.
For installation instructions, see INSTALL.
Basic install steps:
python setup.py build
If that worked, then:
python setup.py install
"""
import os
import sys
from glob import glob
__author__='Ken Lauer'
__license__='zlib'
__date__="$Date$"
__version__="$Revision$"
import setuptools
from setuptools import (setup, Extension)
from setuptools.command.install import install
setuptools_version = setuptools.__version__
print('Using setuptools (version %s).' % setuptools_version)
if setuptools_version:
if (setuptools_version in ["0.6c%d"%i for i in range(1,9)] # old versions
or setuptools_version=="0.7a1"): # 0.7a1 py 3k alpha version based on old version
print('Patching setuptools.build_ext.get_ext_filename')
from setuptools.command import build_ext
def get_ext_filename(self, fullname):
from setuptools.command.build_ext import (_build_ext, Library, use_stubs)
filename = _build_ext.get_ext_filename(self,fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
return filename
build_ext.build_ext.get_ext_filename = get_ext_filename
# release version number
box2d_version = '2.3'
release_number = 8
# create the version string
version_str = "%s.%s" % (box2d_version, release_number)
# setup some paths and names
library_base='library' # the directory where the egg base will be for setuptools develop command
library_name='Box2D' # the final name that the library should end up being
library_path=os.path.join(library_base, library_name) # library/Box2D (e.g.)
source_dir='Box2D' # where all of the C++ and SWIG source resides
swig_source='Box2D.i' # the main SWIG source file
use_kwargs=True # whether or not to default creating kwargs for all functions
def write_init():
# read in the license header
license_header = open(os.path.join(source_dir, 'pybox2d_license_header.txt')).read()
# create the source code for the file
if sys.version_info >= (2, 6):
import_string = "from .%s import *" % library_name
else:
import_string = "from %s import *" % library_name
init_source = [
import_string,
"__author__ = '%s'" % __date__ ,
"__version__ = '%s'" % version_str,
"__version_info__ = (%s,%d)" % (box2d_version.replace('.', ','), release_number),
"__revision__ = '%s'" % __version__,
"__license__ = '%s'" % __license__ ,
"__date__ = '%s'" % __date__ , ]
# and create the __init__ file with the appropriate version string
f=open(os.path.join(library_path, '__init__.py'), 'w')
f.write(license_header)
f.write( '\n'.join(init_source) )
f.close()
source_paths = [
os.path.join(source_dir, 'Dynamics'),
os.path.join(source_dir, 'Dynamics', 'Contacts'),
os.path.join(source_dir, 'Dynamics', 'Joints'),
os.path.join(source_dir, 'Common'),
os.path.join(source_dir, 'Collision'),
os.path.join(source_dir, 'Collision', 'Shapes'),
]
# glob all of the paths and then flatten the list into one
box2d_source_files = [os.path.join(source_dir, swig_source)] + \
sum( [glob(os.path.join(path, "*.cpp")) for path in source_paths], [])
# arguments to pass to SWIG. for old versions of SWIG, -O (optimize) might not be present.
# Defaults:
# -O optimize, -includeall follow all include statements, -globals changes cvar->b2Globals
# -Isource_dir adds source dir to include path, -outdir library_path sets the output directory
# -small makes the Box2D_wrap.cpp file almost unreadable, but faster to compile. If you want
# to try to understand it for whatever reason, I'd recommend removing that option.
swig_arguments = \
'-c++ -I%s -small -O -includeall -ignoremissing -w201 -globals b2Globals -outdir %s' \
% (source_dir, library_path)
if use_kwargs:
# turn off the warnings about functions that can't use kwargs (-w511)
# and let the wrapper know we're using kwargs (-D_SWIG_KWARGS)
swig_arguments += " -keyword -w511 -D_SWIG_KWARGS"
# depending on the platform, add extra compilation arguments. hopefully if the platform
# isn't windows, g++ will be used; -Wno-unused then would suppress some annoying warnings
# about the Box2D source.
if sys.platform in ('win32', 'win64'):
extra_args=['-I.']
else:
extra_args=['-I.', '-Wno-unused']
pybox2d_extension = \
Extension('Box2D._Box2D', box2d_source_files, extra_compile_args=extra_args, language='c++')
LONG_DESCRIPTION = \
""" 2D physics library Box2D %s for usage in Python.
After installing please be sure to try out the testbed demos.
They require either pygame or pyglet and are available on the
homepage.
pybox2d homepage: http://pybox2d.googlecode.com
Box2D homepage: http://www.box2d.org
""" % (box2d_version,)
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries :: pygame",
]
class PostInstallHook(install):
'''
Describes actions to be executed after the install.
In this case, install has the wrong order of operations -
it first copies *.py files, then builds *.py files using swig.
This class copies new *.py files one more time after install.
'''
def run(self):
import os
import shutil
recopy_list = ['Box2D.py', '__init__.py']
install.run(self)
dest = os.path.split(self.get_outputs()[0])[0]
for fname in recopy_list:
local_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), library_base, library_name, fname)
installed_path = os.path.join(dest, fname)
print('Re-copying {} --> {}'.format(local_path, installed_path))
shutil.copyfile(local_path, installed_path)
write_init()
setup_dict = dict(
name = "box2d-py",
version = version_str,
author = "Ken Lauer",
author_email = "[email protected]",
description = "Python Box2D",
license = "zlib",
url = "https://github.com/openai/box2d-py",
long_description = LONG_DESCRIPTION,
classifiers = CLASSIFIERS,
packages = ['Box2D', 'Box2D.b2'],
package_dir = {'Box2D': library_path,
'Box2D.b2': os.path.join(library_path, 'b2'),
'Box2D.tests' : 'tests'},
test_suite = 'tests',
options = { 'build_ext': { 'swig_opts' : swig_arguments },
'egg_info' : { 'egg_base' : library_base },
},
ext_modules = [ pybox2d_extension ],
cmdclass = {'install': PostInstallHook},
# use_2to3 = (sys.version_info >= (3,)),
)
# run the actual setup from distutils
setup(**setup_dict)
|
import os
for platform in ['win32', 'win-amd64']:
versions=['2.5', '2.6', '2.7', '3.0', '3.1', '3.2']
def get_path(platform, version):
if platform=='win32':
version_path=''
else:
version_path='-x64'
return r'c:\python%s%s\python.exe'%(version.replace('.', ''), version_path)
interpreters=[get_path(platform, version) for version in versions]
lib_paths=[r'build\lib.%s-%s' % (platform, version) for version in versions]
print '@echo off'
print 'echo ---Start--- > test_results.txt'
print 'echo ---Start--- > build_results.txt'
do_stuff='''
echo * %(version)s %(platform)s
echo -------- %(version)s %(platform)s -------- >> build_results.txt
echo -------- %(version)s %(platform)s -------- >> test_results.txt
%(interpreter)s setup.py clean -a >> build_results.txt 2>&1
%(interpreter)s setup.py build --force >> build_results.txt 2>&1
type Box2D\pybox2d_license_header.txt > Box2D_.py
type %(lib_path)s\Box2D\Box2D.py >> Box2D_.py
move /y Box2D_.py %(lib_path)s\Box2D\Box2D.py
%(interpreter)s setup.py develop >> build_results.txt 2>&1
%(interpreter)s setup.py test >> test_results.txt 2>&1
%(interpreter)s setup.py bdist_wininst -t"pybox2d" -dinstaller >> build_results.txt 2>&1
%(interpreter)s setup.py bdist_egg >> build_results.txt 2>&1
'''
distutils_cfg='''[build]
compiler=mingw32'''
for version, interpreter, lib_path in zip(versions, interpreters, lib_paths):
python_path=os.path.split(interpreter)[0]
if version < '2.6':
cfg_file=r'%s\lib\distutils\distutils.cfg' % python_path
try:
open(cfg_file, 'w').write(distutils_cfg)
except:
pass
print do_stuff % locals()
|
from all_classes import *
print("""
/*
* pybox2d -- http://pybox2d.googlecode.com
*
* Copyright (c) 2010 Ken Lauer / sirkne at gmail dot com
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
%pythoncode %{
def _dir_filter(self):
# Using introspection, mimic dir() by adding up all of the __dicts__
# for the current class and all base classes (type(self).__mro__ returns
# all of the classes that make it up)
# Basically filters by:
# __x__ OK
# __x bad
# _classname bad
def check(s):
if s[:2]=='__':
if s[-2:]=='__':
return True
else:
return False
else:
for typename in typenames:
if typename in s:
return False
return True
keys=sum([c.__dict__.keys() for c in type(self).__mro__], [])
typenames = ["_%s" % c.__name__ for c in type(self).__mro__]
ret=[s for s in list(set(keys)) if check(s)]
ret.sort()
return ret
%}
""")
extend_string="""
%%extend %s {
%%pythoncode %%{
__dir__ = _dir_filter
%%}
}
"""
for c in all_classes:
print(extend_string % c)
|
classname = "b2DebugDraw"
gets = "GetFlags SetFlags ClearFlags AppendFlags".split(" ")
sets = "".split(" ")
kwargs = True
# remove duplicates
gets = list(set(gets))
sets = list(set(sets))
renames = ["%%rename(__%s) %s::%s;" % (s, classname, s) for s in gets+sets
if s not in ('GetAnchorA', 'GetAnchorB', '')]
gets_mod=[]
for s in gets:
if s[:3]=="Get":
gets_mod.append(s[3:])
elif s[:2]=="Is":
gets_mod.append(s[2:])
else:
gets_mod.append(s)
sets_mod=[]
for s in sets:
if s[:3]=="Set":
sets_mod.append(s[3:])
else:
sets_mod.append(s)
done = []
getter_setter = []
getter = []
for i, s in enumerate(gets_mod):
if s in sets_mod:
orig_set=sets[ sets_mod.index(s) ]
orig_get = gets[i]
getter_setter.append( (s, orig_get, orig_set) )
sets[sets_mod.index(s)] = None
else:
getter.append( (s, gets[i]) )
setter = [s for s in sets if s is not None]
if kwargs:
print '''
/**** %s ****/
%%extend %s {
public:
%%pythoncode %%{
def __init__(self, **kwargs):
"""__init__(self, **kwargs) -> %s """
_Box2D.%s_swiginit(self,_Box2D.new_%s())
for key, value in kwargs.items():
setattr(self, key, value)
# Read-write properties
''' % tuple([classname[2:]] + [classname]*4)
else:
print '''
/**** %s ****/
%%extend %s {
public:
%%pythoncode %%{
# Read-write properties
''' % (classname[2:], classname)
for name, g, s in getter_setter:
newname= name[0].lower() + name[1:]
print " %s = property(__%s, __%s)" % (newname, g, s)
print " # Read-only"
for name, g in getter:
newname= name[0].lower() + name[1:]
if newname in ('anchorA', 'anchorB'):
print " %s = property(lambda self: self._b2Joint__%s(), None)" % (newname, name)
else:
print " %s = property(__%s, None)" % (newname, g)
print " # Write-only"
for s in setter:
if not s: continue
if s[:3]=='Set':
name = s[3:]
else:
name = s
newname= name[0].lower() + name[1:]
print " %s = property(None, __%s)" % (newname, s)
print """
%}
}
"""
print " ",
print "\n ".join(renames)
|
from __future__ import print_function
# add "b2ContactPoint", as it's custom
# First, define some classes not seen by swig
ignore = ["b2ContactConstraint", "b2PolygonContact", "b2PolygonAndCircleContact", "b2CircleContact", "b2JointType", "b2BodyType", "b2ContactSolver", "b2PointState", "b2WorldQueryWrapper", "b2WorldRayCastWrapper", "b2SeparationFunction", "b2Block", "b2ContactConstraintPoint", "b2PositionSolverManifold", "b2LimitState"]
# Then check which ones we manually ignore
for line in open('../Box2D/Box2D.i', 'r').readlines():
if '%ignore' in line:
c = line.split('ignore ')[1].strip()
if ';' not in c:
continue
c = c[:c.index(';')].strip()
ignore.append(c)
all_classes= [
"b2AABB",
"b2Block",
"b2BlockAllocator",
"b2Body",
"b2BodyDef",
"b2BodyType",
"b2BroadPhase",
"b2Chunk",
"b2CircleContact",
"b2CircleShape",
"b2ClipVertex",
"b2Color",
"b2Contact",
"b2ContactPoint",
"b2ContactConstraint",
"b2ContactConstraintPoint",
"b2ContactEdge",
"b2ContactFilter",
"b2ContactID",
"b2ContactImpulse",
"b2ContactListener",
"b2ContactManager",
"b2ContactRegister",
"b2ContactSolver",
"b2DebugDraw",
"b2DestructionListener",
"b2DistanceInput",
"b2DistanceJoint",
"b2DistanceJointDef",
"b2DistanceOutput",
"b2DistanceProxy",
"b2DynamicTree",
"b2DynamicTreeNode",
"b2Filter",
"b2Fixture",
"b2FixtureDef",
"b2FrictionJoint",
"b2FrictionJointDef",
"b2GearJoint",
"b2GearJointDef",
"b2Island",
"b2Jacobian",
"b2Joint",
"b2JointDef",
"b2JointEdge",
"b2JointType",
"b2LimitState",
"b2LineJoint",
"b2LineJointDef",
"b2Manifold",
"b2ManifoldPoint",
"b2MassData",
"b2Mat22",
"b2Mat33",
"b2MouseJoint",
"b2MouseJointDef",
"b2Pair",
"b2PointState",
"b2PolygonAndCircleContact",
"b2PolygonContact",
"b2PolygonShape",
"b2Position",
"b2PositionSolverManifold",
"b2PrismaticJoint",
"b2PrismaticJointDef",
"b2PulleyJoint",
"b2PulleyJointDef",
"b2QueryCallback",
"b2RayCastCallback",
"b2RayCastInput",
"b2RayCastOutput",
"b2RevoluteJoint",
"b2RevoluteJointDef",
"b2Segment",
"b2SeparationFunction",
"b2Shape",
"b2Simplex",
"b2SimplexCache",
"b2SimplexVertex",
"b2StackAllocator",
"b2StackEntry",
"b2Sweep",
"b2TimeStep",
"b2TOIInput",
"b2Transform",
"b2Vec2",
"b2Vec3",
"b2Velocity",
"b2Version",
"b2WeldJoint",
"b2WeldJointDef",
"b2World",
"b2WorldManifold",
"b2WorldQueryWrapper",
"b2WorldRayCastWrapper" ]
# Statistics
print("Total classes: %d" % len(all_classes))
print('Ignored classes:', len(ignore))
for c in ignore:
if c not in all_classes:
print('%s not found' % c)
else:
all_classes.remove(c)
print('Remaining classes:', len(all_classes))
# And the classes are available in 'all_classes'
|
from __future__ import print_function
import glob
import re
import os
from all_classes import *
files = glob.glob('../Box2D/Box2D_*.i')
classes = {}
ignore_files = [
'Box2D_kwargs.i',
'Box2D_dir.i',
]
def find_extended(path):
f = open(path, "r")
fn = os.path.split(path)[1]
for line in f.readlines():
m=re.search('%extend\s*(.*)\s*{', line)
if m:
cls=m.groups()[0].strip()
if cls in classes:
if fn not in classes[cls]:
classes[cls].append(fn)
else:
classes[cls] = [fn]
f.close()
for file in files:
if os.path.split(file)[1] not in ignore_files:
find_extended(file)
print("%19s %s" % ('Class', 'Extended in'))
remaining=list(all_classes)
for key, value in list(classes.items()):
print("%20s %s" % (key, ', '.join(value)))
remaining.remove(key)
ignore_unmodified=[s for s in remaining if s[-3:] == 'Def']
#ignore_unmodified += []
print()
print("Unmodified classes")
for cls in remaining:
if cls not in ignore_unmodified:
print(cls)
|
from all_classes import *
print("""
/*
* pybox2d -- http://pybox2d.googlecode.com
*
* Copyright (c) 2010 Ken Lauer / sirkne at gmail dot com
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
%pythoncode %{
def _init_kwargs(self, **kwargs):
for key, value in list(kwargs.items()):
try:
setattr(self, key, value)
except Exception as ex:
raise ex.__class__('Failed on kwargs, class="%s" key="%s": %s'
% (self.__class__.__name__, key, ex))
%}
""")
extend_string="""
%%feature("shadow") %s::%s() {
def __init__(self, **kwargs):
_Box2D.%s_swiginit(self,_Box2D.new_%s())
_init_kwargs(self, **kwargs)
}
"""
director_string="""
%%feature("shadow") %s::%s() {
def __init__(self, **kwargs):
if self.__class__ == %s:
_self = None
else:
_self = self
_Box2D.%s_swiginit(self,_Box2D.new_%s(_self, ))
_init_kwargs(self, **kwargs)
}
"""
all_classes.remove('b2World')
director_classes = ['b2ContactFilter', 'b2ContactListener', 'b2QueryCallback', 'b2DebugDraw', 'b2DestructionListener' ]
abstract_classes = ['b2Joint', 'b2Shape', 'b2RayCastCallback', 'b2Contact']
for c in abstract_classes:
# print(director_string % tuple([c]*4))
all_classes.remove(c)
for c in director_classes:
print(director_string % tuple([c]*5))
all_classes.remove(c)
for c in all_classes:
print(extend_string % tuple([c]*4))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import itertools
import sys
class testJoints (unittest.TestCase):
world = None
dbody1 = None
dbody2 = None
sbody1 = None
sbody2 = None
def _fail(self, s):
# This little function would save us from an AssertionError on garbage collection.
# It forces the gc to take care of the world early.
self.world = None
self.dbody1 = None
self.dbody2 = None
self.sbody1 = None
self.sbody2 = None
self.b2 = None
self.fail(s)
def setUp(self):
self.b2 = __import__('Box2D')
try:
self.world = self.b2.b2World(self.b2.b2Vec2(0.0, -10.0), True)
except Exception as ex:
self.fail("Failed to create world (%s)" % ex)
try:
self.dbody1 = self.create_body((-3, 12))
self.dbody1.userData = "dbody1"
self.dbody2 = self.create_body((0, 12))
self.dbody2.userData = "dbody2"
except Exception as ex:
self._fail("Failed to create dynamic bodies (%s)" % ex)
try:
self.sbody1 = self.create_body((0, 0), False)
self.sbody1.userData = "sbody1"
self.sbody2 = self.create_body((1, 4), False)
self.sbody2.userData = "sbody2"
except Exception as ex:
self._fail("Failed to create static bodies (%s)" % ex)
def create_body(self, position, dynamic=True):
bodyDef = self.b2.b2BodyDef()
fixtureDef = self.b2.b2FixtureDef()
if dynamic:
bodyDef.type = self.b2.b2_dynamicBody
fixtureDef.density = 1
else:
bodyDef.type = self.b2.b2_staticBody
fixtureDef.density = 0
bodyDef.position = position
body = self.world.CreateBody(bodyDef)
dynamicBox = self.b2.b2PolygonShape()
dynamicBox.SetAsBox(1, 1)
fixtureDef.shape = dynamicBox
fixtureDef.friction = 0.3
body.CreateFixture(fixtureDef)
return body
def create_circle_body(self, position, dynamic=True):
bodyDef = self.b2.b2BodyDef()
fixtureDef = self.b2.b2FixtureDef()
if dynamic:
bodyDef.type = self.b2.b2_dynamicBody
fixtureDef.density = 1
else:
bodyDef.type = self.b2.b2_staticBody
fixtureDef.density = 0
bodyDef.position = position
body = self.world.CreateBody(bodyDef)
body.CreateFixture(shape=self.b2.b2CircleShape(radius=1.0), density=1.0, friction=0.3)
return body
def step_world(self, steps=10):
timeStep = 1.0 / 60
vel_iters, pos_iters = 6, 2
for i in range(steps):
self.world.Step(timeStep, vel_iters, pos_iters)
def check(self, dfn, joint, prop, joint_prop=""):
a = getattr(dfn, prop)
if joint_prop:
b = getattr(joint, joint_prop)
else:
b = getattr(joint, prop)
self.assertEquals(a, b, "Property not equal from definition to joint: %s (dfn %s != joint %s)" % (prop, a, b) )
# ---- revolute joint ----
def revolute_definition(self, body1, body2, anchor):
dfn=self.b2.b2RevoluteJointDef()
dfn.Initialize(body1, body2, anchor)
dfn.motorSpeed = 1.0 * self.b2.b2_pi
dfn.maxMotorTorque = 10000.0
dfn.enableMotor = False
dfn.lowerAngle = -0.25 * self.b2.b2_pi
dfn.upperAngle = 0.5 * self.b2.b2_pi
dfn.enableLimit = True
dfn.collideConnected = True
return dfn
def revolute_asserts(self, dfn, joint):
self.check(dfn, joint, "motorSpeed")
self.check(dfn, joint, "lowerAngle", "lowerLimit")
self.check(dfn, joint, "upperAngle", "upperLimit")
self.check(dfn, joint, "enableMotor", "motorEnabled")
self.check(dfn, joint, "enableLimit", "limitEnabled")
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
def revolute_checks(self, dfn, joint):
# check to make sure they are at least accessible
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
joint.GetMotorTorque(1.0)
i = joint.angle
i = joint.speed
i = joint.anchorA
i = joint.anchorB
joint.upperLimit = 2
joint.maxMotorTorque = 10.0
joint.foobar = 2
# ---- prismatic joint ----
def prismatic_definition(self, body1, body2, anchor, axis):
dfn=self.b2.b2PrismaticJointDef()
dfn.Initialize(body1, body2, anchor, axis)
dfn.motorSpeed = 10
dfn.maxMotorForce = 1000.0
dfn.enableMotor = True
dfn.lowerTranslation = 0
dfn.upperTranslation = 20
dfn.enableLimit = True
return dfn
def prismatic_asserts(self, dfn, joint):
self.check(dfn, joint, "motorSpeed")
self.check(dfn, joint, "lowerTranslation", "lowerLimit")
self.check(dfn, joint, "upperTranslation", "upperLimit")
self.check(dfn, joint, "enableMotor", "motorEnabled")
self.check(dfn, joint, "enableLimit", "limitEnabled")
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
self.check(dfn, joint, "maxMotorForce")
def prismatic_checks(self, dfn, joint):
# check to make sure they are at least accessible
i = joint.GetMotorForce(1.0)
i = joint.anchorA
i = joint.anchorB
i = joint.speed
i = joint.translation
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- distance joint ----
def distance_definition(self, body1, body2, anchorA, anchorB):
dfn=self.b2.b2DistanceJointDef()
dfn.Initialize(body1, body2, anchorA, anchorB)
dfn.length = (self.b2.b2Vec2(*anchorA) - self.b2.b2Vec2(*anchorB)).length
dfn.frequencyHz = 4.0
dfn.dampingRatio = 0.5
return dfn
def distance_asserts(self, dfn, joint):
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
self.check(dfn, joint, "length")
self.check(dfn, joint, "frequencyHz", "frequency")
self.check(dfn, joint, "dampingRatio")
def distance_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- Rope joint ----
def rope_definition(self, body1, body2, anchorA, anchorB, maxLength):
dfn=self.b2.b2RopeJointDef(bodyA=body1, bodyB=body2, anchorA=anchorA, \
anchorB=anchorB, maxLength=maxLength)
return dfn
def rope_asserts(self, dfn, joint):
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
self.check(dfn, joint, "maxLength")
def rope_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
joint.limitState
# ---- pulley joint ----
def pulley_definition(self, body1, body2):
if body2.mass == 0 or body1.mass == 0:
body1 = self.dbody2
body2 = self.dbody1
dfn=self.b2.b2PulleyJointDef()
a, b = 2, 4
y, L = 16, 12
anchor1 =(body1.position.x, y + b)
anchor2 =(body1.position.x, y + b)
groundAnchor1=(body1.position.x, y + b + L)
groundAnchor2=(body1.position.x, y + b + L)
dfn.Initialize(body1, body2, groundAnchor1, groundAnchor2, anchor1, anchor2, 2.0)
return dfn
def pulley_asserts(self, dfn, joint):
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
self.check(dfn, joint, "groundAnchorA")
self.check(dfn, joint, "groundAnchorB")
self.check(dfn, joint, "ratio")
def pulley_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- mouse joint ----
def mouse_definition(self, body1, body2):
dfn=self.b2.b2MouseJointDef()
if body2.mass == 0:
body2 = self.dbody1
if body2 == body1:
body1 = self.sbody1
dfn.bodyA = body1
dfn.bodyB = body2
dfn.target = (2, 1)
dfn.maxForce = 10
return dfn
def mouse_asserts(self, dfn, joint):
self.check(dfn, joint, "target")
self.check(dfn, joint, "maxForce")
self.check(dfn, joint, "frequencyHz", "frequency")
self.check(dfn, joint, "dampingRatio")
def mouse_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- wheel joint ----
def wheel_definition(self, body1, body2, anchor, axis):
dfn=self.b2.b2WheelJointDef()
dfn.Initialize(body1, body2, anchor, axis)
dfn.motorSpeed = 0
dfn.maxMotorForce = 100.0
dfn.enableMotor = True
dfn.lowerTranslation = -4.0
dfn.upperTranslation = 4.0
dfn.enableLimit = True
return dfn
def wheel_asserts(self, dfn, joint):
self.check(dfn, joint, "motorSpeed")
self.check(dfn, joint, "maxMotorTorque")
self.check(dfn, joint, "enableMotor", "motorEnabled")
self.check(dfn, joint, "bodyA")
self.check(dfn, joint, "bodyB")
self.check(dfn, joint, "frequencyHz", "springFrequencyHz")
self.check(dfn, joint, "dampingRatio", "springDampingRatio")
def wheel_checks(self, dfn, joint):
# check to make sure they are at least accessible
i = joint.anchorA
i = joint.anchorB
i = joint.speed
i = joint.translation
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- weld joint ----
def weld_definition(self, body1, body2):
dfn=self.b2.b2WeldJointDef()
dfn.bodyA = body1
dfn.bodyB = body2
return dfn
def weld_asserts(self, dfn, joint):
pass
def weld_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
# ---- friction joint ----
def friction_definition(self, body1, body2):
dfn=self.b2.b2FrictionJointDef()
dfn.bodyA = body1
dfn.bodyB = body2
dfn.localAnchorA = dfn.localAnchorB = (0,0)
dfn.collideConnected = True
dfn.maxForce = 10.0
dfn.maxTorque = 20.0
return dfn
def friction_asserts(self, dfn, joint):
self.check(dfn, joint, "maxForce")
self.check(dfn, joint, "maxTorque")
def friction_checks(self, dfn, joint):
joint.GetReactionForce(1.0)
joint.GetReactionTorque(1.0)
def do_joint(self, name, init_args):
'''test a joint by name'''
get_dfn = getattr(self, "%s_definition"%name)
asserts = getattr(self, "%s_asserts"%name)
checks = getattr(self, "%s_checks"%name)
create_name=getattr(self.world, "Create%sJoint" % (name.capitalize()))
for bodyA, bodyB in itertools.permutations( ( self.sbody1, self.sbody2, self.dbody1, self.dbody2), 2 ):
try:
dfn = get_dfn(body1=bodyA, body2=bodyB, **init_args)
except Exception as ex:
self._fail("%s: Failed on bodies %s and %s, joint definition (%s)" % (name, bodyA.userData, bodyB.userData, ex))
kw_args = {}
try:
kw_args = dfn.to_kwargs()
joint = create_name(**kw_args)
self.world.DestroyJoint(joint)
except Exception as ex:
self._fail("%s: Failed on bodies %s and %s, joint creation by kwargs (%s) kw=%s" % (name, bodyA.userData, bodyB.userData, ex, kw_args))
try:
joint = self.world.CreateJoint(dfn)
except Exception as ex:
self._fail("%s: Failed on bodies %s and %s, joint creation (%s)" % (name, bodyA.userData, bodyB.userData, ex))
try:
asserts(dfn, joint)
except Exception as ex:
self.world.DestroyJoint(joint)
raise
try:
checks(dfn, joint)
except Exception as ex:
self.world.DestroyJoint(joint)
self._fail("%s: Failed on bodies %s and %s, joint checks (%s)" % (name, bodyA.userData, bodyB.userData, ex))
try:
self.step_world(5)
except Exception as ex:
# self.world.DestroyJoint(joint) # -> locked
try:
# Ok, this will cause an assertion to go off during unwinding (in the b2StackAllocator deconstructor),
# so do it once, catch that, and then fail finally
self.fail()
except AssertionError as ex:
self._fail("%s: Failed on bodies %s and %s, joint simulation (%s)" % (name, bodyA.userData, bodyB.userData, ex))
try:
self.world.DestroyJoint(joint)
except Exception as ex:
s=sys.exc_info()[1]
self._fail("%s: Failed on bodies %s and %s joint deletion (%s)" % (name, bodyA.userData, bodyB.userData, s))
# --- the actual tests ---
def test_revolute(self):
self.do_joint("revolute", { 'anchor' : (0, 12) })
def test_prismatic(self):
self.do_joint("prismatic", { 'anchor' : (0, 0), 'axis' : (1,0) })
def test_distance(self):
self.do_joint("distance", { 'anchorA' : (-10, 0), 'anchorB' : (-0.5,-0.5) })
def test_rope(self):
self.do_joint("rope", { 'anchorA' : (-10, 0), 'anchorB' : (-0.5,-0.5), 'maxLength' : 5.0 })
def test_pulley(self):
self.do_joint("pulley", {} )
def test_mouse(self):
self.do_joint("mouse", {} )
def test_wheel(self):
self.do_joint("wheel", { 'anchor' : (0, 8.5), 'axis' : (2,1) })
def test_weld(self):
self.do_joint("weld", {} )
def test_friction(self):
self.do_joint("friction", {} )
def test_emptyjoint(self):
try:
self.world.CreateJoint( self.b2.b2RevoluteJointDef() )
except ValueError:
pass # good
else:
raise Exception('Empty joint should have raised an exception')
def test_gear(self):
# creates 2 revolute joints and then joins them, so it's done separately
ground=self.world.CreateBody( self.b2.b2BodyDef() )
shape=self.b2.b2EdgeShape(vertices=((50.0, 0.0), (-50.0, 0.0)))
ground.CreateFixturesFromShapes(shapes=shape)
body1=self.create_circle_body((-3, 12))
body2=self.create_circle_body(( 0, 12))
jd1=self.b2.b2RevoluteJointDef()
jd1.Initialize(ground, body1, body1.position)
joint1 = self.world.CreateJoint(jd1)
jd2=self.b2.b2RevoluteJointDef()
jd2.Initialize(ground, body2, body2.position)
joint2 = self.world.CreateJoint(jd2)
gjd=self.b2.b2GearJointDef()
gjd.bodyA = body1
gjd.bodyB = body2
gjd.joint1 = joint1
gjd.joint2 = joint2
gjd.ratio = 2.0
gj = self.world.CreateJoint(gjd)
self.step_world(10)
self.check(gjd, gj, "ratio")
gj.GetReactionForce(1.0)
gj.GetReactionTorque(1.0)
self.world.DestroyJoint(gj)
self.world.DestroyJoint(joint2)
self.world.DestroyJoint(joint1)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from Box2D import *
import Box2D
class cl (b2ContactListener):
pass
class test_body (unittest.TestCase):
def setUp(self):
pass
def test_world(self):
world = b2World(gravity=(0,-10), doSleep=True)
world = b2World((0,-10), True)
world = b2World((0,-10), doSleep=True)
def test_extended(self):
world = b2World()
fixture1=b2FixtureDef(shape=b2CircleShape(radius=1), density=1, friction=0.3)
fixture2=b2FixtureDef(shape=b2CircleShape(radius=2), density=1, friction=0.3)
shape1=b2PolygonShape(box=(5,1))
shape2=b2PolygonShape(box=(5,1))
shapefixture=b2FixtureDef(density=2.0, friction=0.3)
world.CreateStaticBody(fixtures=[fixture1, fixture2],
shapes=[shape1, shape2], shapeFixture=shapefixture)
# make sure that 4 bodies were created
self.assertEqual(len(world.bodies[-1].fixtures), 4)
world.CreateKinematicBody(fixtures=[fixture1, fixture2],
shapes=[shape1, shape2], shapeFixture=shapefixture)
self.assertEqual(len(world.bodies[-1].fixtures), 4)
world.CreateDynamicBody(fixtures=[fixture1, fixture2],
shapes=[shape1, shape2], shapeFixture=shapefixture)
self.assertEqual(len(world.bodies[-1].fixtures), 4)
def test_body(self):
self.cont_list=cl()
world = b2World(gravity=(0,-10), doSleep=True, contactListener=self.cont_list)
groundBody = world.CreateBody(b2BodyDef(position=(0,-10)))
groundBody.CreateFixturesFromShapes(shapes=b2PolygonShape(box=(50,10)))
body = world.CreateBody(b2BodyDef(type=b2_dynamicBody, position=(0,4)))
body.CreateFixture(b2FixtureDef(shape=b2CircleShape(radius=1), density=1, friction=0.3))
timeStep = 1.0 / 60
vel_iters, pos_iters = 6, 2
for i in range(60):
world.Step(timeStep, vel_iters, pos_iters)
world.ClearForces()
def test_new_createfixture(self):
world = b2World(gravity=(0,-10), doSleep=True)
body=world.CreateDynamicBody(position=(0,0))
body.CreateCircleFixture(radius=0.2, friction=0.2, density=1.0)
body.fixtures[0]
body.fixtures[0].friction
body.fixtures[0].density
body.fixtures[0].shape.radius
body.CreatePolygonFixture(box=(1,1), friction=0.2, density=1.0)
body.fixtures[1]
body.fixtures[1].friction
body.fixtures[1].density
body.fixtures[1].shape.vertices
v1=(-10, 0)
v2=(-7, -1)
v3=(-4, 0)
v4=(0, 0)
body.CreateEdgeFixture(vertices=[v1,v2,v3,v4], friction=0.3, density=1.0)
body.fixtures[2]
body.fixtures[2].friction
body.fixtures[2].density
body.fixtures[2].shape.vertices
#TODO Loop shapes
def test_fixture_without_shape(self):
world = b2World(gravity=(0,-10), doSleep=True)
body = world.CreateDynamicBody(position=(0,0))
self.assertRaises(ValueError, body.CreateFixture)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from Box2D import *
class cl (b2ContactListener):
pass
class test_kwargs (unittest.TestCase):
def setUp(self):
pass
def test_kwargs(self):
self.cont_list=cl()
world = b2World(gravity=(0,-10), doSleep=True, contactListener=self.cont_list)
groundBody = world.CreateBody(b2BodyDef(position=(0,-10)))
groundBody.CreateFixturesFromShapes(b2PolygonShape(box=(50,10)))
body = world.CreateBody(b2BodyDef(type=b2_dynamicBody, position=(0,4),
fixtures=[]))
body = world.CreateBody(
type=b2_dynamicBody,
position=(0,4),
fixtures=b2FixtureDef(shape=b2PolygonShape(box=(2,1)), density=1.0)
)
body = world.CreateBody(
type=b2_dynamicBody,
position=(0,4),
shapes=(b2PolygonShape(box=(2,1)), b2PolygonShape(box=(2,1))),
shapeFixture=b2FixtureDef(density=1.0),
)
body = world.CreateBody(
type=b2_dynamicBody,
position=(0,4),
fixtures=b2FixtureDef(shape=b2CircleShape(radius=1), density=1, friction=0.3),
shapes=(b2PolygonShape(box=(2,1)), b2PolygonShape(box=(2,1))),
shapeFixture=b2FixtureDef(density=1.0),
)
body.CreateFixture(shape=b2CircleShape(radius=1), density=1, friction=0.3)
timeStep = 1.0 / 60
vel_iters, pos_iters = 6, 2
for i in range(60):
world.Step(timeStep, vel_iters, pos_iters)
world.ClearForces()
def test_body(self):
world = b2World(gravity=(0,-10), doSleep=True)
body = world.CreateBody(b2BodyDef())
body2 = world.CreateBody(position=(1,1))
def test_joints(self):
world = b2World(gravity=(0,-10), doSleep=True)
body = world.CreateBody(b2BodyDef())
body2 = world.CreateBody(position=(1,1))
world.CreateJoint(type=b2RevoluteJoint, bodyA=body, bodyB=body2)
world.CreateJoint(type=b2RevoluteJointDef, bodyA=body, bodyB=body2)
kwargs=dict(type=b2RevoluteJointDef, bodyA=body, bodyB=body2)
world.CreateJoint(**kwargs)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from Box2D import b2Vec2, b2Vec3, b2Mat22, b2Mat33
class testMatrix (unittest.TestCase):
def checkAlmostEqual(self, v1, v2, msg):
for a, b in zip(v1, v2):
self.assertAlmostEqual(a, b, places=3,
msg="%s, a=%f b=%f from %s, %s" % (msg, a, b, v1, v2))
def test_mat22_identity(self):
i2 = b2Mat22()
self.checkAlmostEqual(i2.col1, (1.0, 0.0), msg='mat22 col1')
self.checkAlmostEqual(i2.col2, (0.0, 1.0), msg='mat22 col2')
def test_matrix(self):
x, y, z = 1.0, 2.0, 3.0
v2 = b2Vec2(x, y)
self.checkAlmostEqual(v2.skew, (-v2.y, v2.x), msg='skew')
m2 = b2Mat22((x, y), (y, x))
# Note that you can't do:
# m2 = b2Mat22(col1=(x, y), col2=(y, x))
# as SWIG will not allow the kwargs option to be used when there are multiple constructors
m = m2 + m2
self.checkAlmostEqual(m.col1, (x+x, y+y), msg='b2Mat22 +')
self.checkAlmostEqual(m.col2, (y+y, x+x), msg='b2Mat22 +')
m = m2 - m2
self.checkAlmostEqual(m.col1, (0,0), msg='b2Mat22 -')
self.checkAlmostEqual(m.col2, (0,0), msg='b2Mat22 -')
# x y * x
# y x y
v = m2 * v2
self.checkAlmostEqual(v, (x*x + y*y, y*x + y*x), msg='b2Mat22 * b2Vec2')
i=m2.inverse
i=m2.angle
m = m2 * m2
self.checkAlmostEqual(m.col1, (x*x + y*y, y*x + y*x), msg='b2Mat22 * b2Mat22')
self.checkAlmostEqual(m.col2, (x*y + y*x, y*y + x*x), msg='b2Mat22 * b2Mat22')
m2 += m2
self.checkAlmostEqual(m2.col1, (x+x, y+y), msg='b2Mat22 +=')
self.checkAlmostEqual(m2.col2, (y+y, x+x), msg='b2Mat22 +=')
m2 -= m2
self.checkAlmostEqual(m2.col1, (0,0), msg='b2Mat22 -=')
self.checkAlmostEqual(m2.col2, (0,0), msg='b2Mat22 -=')
def test_mat33_identity(self):
i3 = b2Mat33()
self.checkAlmostEqual(i3.col1, (1.0, 0.0, 0.0), msg='mat33 col1')
self.checkAlmostEqual(i3.col2, (0.0, 1.0, 0.0), msg='mat33 col2')
self.checkAlmostEqual(i3.col3, (0.0, 0.0, 1.0), msg='mat33 col3')
def test_mat33(self):
x, y, z = 1.0, 2.0, 3.0
v3 = b2Vec3(x, y, z)
m3 = b2Mat33((x, y, z), (z, y, x), (y, x, z))
m = m3 + m3
self.checkAlmostEqual(m.col1, (x+x, y+y, z+z), msg='b2Mat33 +')
self.checkAlmostEqual(m.col2, (z+z, y+y, x+x), msg='b2Mat33 +')
self.checkAlmostEqual(m.col3, (y+y, x+x, z+z), msg='b2Mat33 +')
m = m3 - m3
self.checkAlmostEqual(m.col1, (0,0,0), msg='b2Mat33 -')
self.checkAlmostEqual(m.col2, (0,0,0), msg='b2Mat33 -')
self.checkAlmostEqual(m.col3, (0,0,0), msg='b2Mat33 -')
m3 += m3
self.checkAlmostEqual(m3.col1, (x+x, y+y, z+z), msg='b2Mat33 +=')
self.checkAlmostEqual(m3.col2, (z+z, y+y, x+x), msg='b2Mat33 +=')
self.checkAlmostEqual(m3.col3, (y+y, x+x, z+z), msg='b2Mat33 +=')
m3 -= m3
self.checkAlmostEqual(m3.col1, (0,0,0), msg='b2Mat33 -=')
self.checkAlmostEqual(m3.col2, (0,0,0), msg='b2Mat33 -=')
self.checkAlmostEqual(m3.col3, (0,0,0), msg='b2Mat33 -=')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import Box2D
import sys
class testWorld (unittest.TestCase):
def setUp(self):
pass
def test_world(self):
try:
world = Box2D.b2World(Box2D.b2Vec2(0.0, -10.0), True)
world = Box2D.b2World((0.0, -10.0), True)
world = Box2D.b2World([0.0, -10.0], False)
world = Box2D.b2World([0.0, -10.0])
world = Box2D.b2World()
world = Box2D.b2World(gravity=[0.0, -10.0])
except Exception:
self.fail("Failed to create world (%s)" % sys.exc_info()[1])
def test_helloworld(self):
gravity = Box2D.b2Vec2(0, -10)
doSleep = True
world = Box2D.b2World(gravity, doSleep)
groundBodyDef = Box2D.b2BodyDef()
groundBodyDef.position = [0, -10]
groundBody = world.CreateBody(groundBodyDef)
groundBox = Box2D.b2PolygonShape()
groundBox.SetAsBox(50, 10)
groundBody.CreateFixturesFromShapes(groundBox)
bodyDef = Box2D.b2BodyDef()
bodyDef.type = Box2D.b2_dynamicBody
bodyDef.position = (0, 4)
body = world.CreateBody(bodyDef)
dynamicBox = Box2D.b2PolygonShape()
dynamicBox.SetAsBox(1, 1)
fixtureDef = Box2D.b2FixtureDef()
fixtureDef.shape = dynamicBox
fixtureDef.density = 1
fixtureDef.friction = 0.3
body.CreateFixture(fixtureDef)
timeStep = 1.0 / 60
vel_iters, pos_iters = 6, 2
for i in range(60):
world.Step(timeStep, vel_iters, pos_iters)
world.ClearForces()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import unittest
from Box2D import (b2Vec2, b2Vec3)
class testVector (unittest.TestCase):
def test_vec2_zero(self):
v2 = b2Vec2()
self.assertAlmostEqual(v2.x, 0.0)
self.assertAlmostEqual(v2.y, 0.0)
def test_vec2(self):
x, y = 1.0, 2.0
v = b2Vec2(x, y)
v += (0.1, 0.1)
self.assertAlmostEqual(v.x, x + 0.1, places=2)
self.assertAlmostEqual(v.y, y + 0.1, places=2)
v -= (0.1, 0.1)
self.assertAlmostEqual(v.x, x, places=2)
self.assertAlmostEqual(v.y, y, places=2)
v = b2Vec2(x, y)
v /= 2.0
self.assertAlmostEqual(v.x, x / 2.0)
self.assertAlmostEqual(v.y, y / 2.0)
v *= 2.0
self.assertAlmostEqual(v.x, x)
self.assertAlmostEqual(v.y, y)
v2 = b2Vec2(x, y)
v = v2 + v2
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
v = v2 - v2
self.assertAlmostEqual(v.x, 0)
self.assertAlmostEqual(v.y, 0)
v = v2 / 2.0
self.assertAlmostEqual(v.x, x / 2)
self.assertAlmostEqual(v.y, y / 2)
v = v2 * 2.0
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
v = 0.5 * v2
self.assertAlmostEqual(v.x, x * 0.5)
self.assertAlmostEqual(v.y, y * 0.5)
v = 2.0 * v2
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
def test_vec3_zero(self):
v3 = b2Vec3()
self.assertAlmostEqual(v3.x, 0.0)
self.assertAlmostEqual(v3.y, 0.0)
self.assertAlmostEqual(v3.z, 0.0)
def test_vec3(self):
x, y, z = 1.0, 2.0, 3.0
v3 = b2Vec3(x, y, z)
v = v3 + v3
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
self.assertAlmostEqual(v.z, z * 2)
v = v3 - v3
self.assertAlmostEqual(v.x, 0)
self.assertAlmostEqual(v.y, 0)
self.assertAlmostEqual(v.z, 0)
v = v3 / 2.0
self.assertAlmostEqual(v.x, x / 2)
self.assertAlmostEqual(v.y, y / 2)
self.assertAlmostEqual(v.z, z / 2)
v = v3 * 2.0
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
self.assertAlmostEqual(v.z, z * 2)
v = 0.5 * v3
self.assertAlmostEqual(v.x, x * 0.5)
self.assertAlmostEqual(v.y, y * 0.5)
self.assertAlmostEqual(v.z, z * 0.5)
v = 2.0 * v3
self.assertAlmostEqual(v.x, x * 2)
self.assertAlmostEqual(v.y, y * 2)
self.assertAlmostEqual(v.z, z * 2)
v = b2Vec3(x, y, z)
v += (0.1, 0.1, 0.1)
self.assertAlmostEqual(v.x, x + 0.1, places=2)
self.assertAlmostEqual(v.y, y + 0.1, places=2)
self.assertAlmostEqual(v.z, z + 0.1, places=2)
v -= (0.1, 0.1, 0.1)
self.assertAlmostEqual(v.x, x, places=2)
self.assertAlmostEqual(v.y, y, places=2)
self.assertAlmostEqual(v.z, z, places=2)
v /= 1
self.assertAlmostEqual(v.x, x, places=2)
self.assertAlmostEqual(v.y, y, places=2)
self.assertAlmostEqual(v.z, z, places=2)
v *= 1
self.assertAlmostEqual(v.x, x, places=2)
self.assertAlmostEqual(v.y, y, places=2)
self.assertAlmostEqual(v.z, z, places=2)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from Box2D import b2Color
class testColor (unittest.TestCase):
def checkAlmostEqual(self, v1, v2, msg, places=3):
for i, (a, b) in enumerate(zip(v1, v2)):
self.assertAlmostEqual(a, b, places=places,
msg="(index %d) %s, a=%f b=%f from %s, %s" % (i, msg, a, b, v1, v2))
def test_color(self):
x, y, z = 1.0, 2.0, 3.0
c1 = b2Color(x, y, z)
c2 = b2Color(z, y, x)
c = c1 + c2
self.checkAlmostEqual(c, (x+z, y+y, z+x), msg='b2Color +')
c = c1 - c2
self.checkAlmostEqual(c, (x-z, y-y, z-x), msg='b2Color -')
c = 2.0 * c1
self.checkAlmostEqual(c, (x+x, y+y, z+z), msg='float * b2Color')
c = c1 * 2.0
self.checkAlmostEqual(c, (x+x, y+y, z+z), msg='b2Color * float')
c = c1 / 2.0
self.checkAlmostEqual(c, (x/2.0, y/2.0, z/2.0), msg='b2Color / float')
c = c1.copy()
c *= 2.0
self.checkAlmostEqual(c, (x+x, y+y, z+z), msg='b2Color *= float')
c = b2Color(c1)
c /= 2.0
self.checkAlmostEqual(c, (x/2.0, y/2.0, z/2.0), msg='b2Color /= float')
c1 += (1.0, 1.0, 1.0)
self.checkAlmostEqual(c1, (x+1, y+1, z+1), msg='b2Color +=')
c1 -= (1.0, 1.0, 1.0)
self.checkAlmostEqual(c1, (x, y, z), msg='b2Color -=')
bytes=b2Color(1, 1, 1).bytes
self.assertEqual(bytes, [255,255,255], msg='bytes (1,1,1)=>%s'%bytes)
bytes=b2Color(0, 0, 0).bytes
self.assertEqual(bytes, [0,0,0], msg='bytes (1,1,1)=>%s'%bytes)
self.assertEqual(c1[0], x)
self.assertEqual(c1[1], y)
self.assertEqual(c1[2], z)
c1.list = (x*2, y*2, z*2)
self.checkAlmostEqual(c1, (x+x, y+y, z+z), msg='b2Color.list')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
class testBasic (unittest.TestCase):
# def setUp(self):
# pass
def test_import(self):
try:
import Box2D
except ImportError:
self.fail("Unable to import Box2D library (%s)" % sys.exc_info()[1])
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from Box2D import *
from math import cos, sin
import sys
class ContactListener(b2ContactListener):
pass
class testPolyshape (unittest.TestCase):
def setUp(self):
pass
def dotest(self, world, v):
body = world.CreateDynamicBody(position=(0,0),
shapes=b2PolygonShape(vertices=v) )
for v1, v2 in zip(v, body.fixtures[0].shape.vertices):
if v1 != v2:
raise Exception('Vertices before and after creation unequal. Before and after zipped=%s'
% zip(v, body.fixtures[0].shape.vertices))
def test_vertices(self):
body = None
self.cont_list=ContactListener()
world = b2World(gravity=(0,-10), doSleep=True, contactListener=self.cont_list)
try:
# bad vertices list
body = world.CreateDynamicBody(
position=(0,4),
shapes=[b2PolygonShape(vertices=(2,1)),
b2PolygonShape(box=(2,1))
]
)
except ValueError:
pass # good
else:
raise Exception("Should have failed with ValueError / length 1")
self.dotest(world, [(1,0),(1,1),(-1,1)] )
self.dotest(world, [b2Vec2(1,0),(1,1),b2Vec2(-1,1)] )
try:
self.dotest(world, [(0,1,5),(1,1)] )
except ValueError:
pass # good
else:
raise Exception("Should have failed with ValueError / length 3")
pi=b2_pi
n=b2_maxPolygonVertices
# int so floating point representation inconsistencies
# don't make the vertex check fail
v = [(int(20*cos(x*2*pi/n)), int(20*sin(x*2*pi/n))) for x in range(n)]
self.dotest(world, v)
try:
self.dotest(world, [(0,1)]*(b2_maxPolygonVertices+1) )
except ValueError:
pass # good
else:
raise Exception("Should have failed with ValueError / max+1")
# convex hull is used now, this is no longer valid
#try:
# shape=b2PolygonShape(vertices=[(1,0),(0,-1),(-1,0)] )
#except ValueError:
# pass # good, not convex
#else:
# raise Exception("Should have failed with ValueError / checkpolygon")
shape=b2PolygonShape(vertices=[(0,0), (0,1), (-1,0)] )
temp=shape.valid
def checkAlmostEqual(self, v1, v2, msg, places=3):
if hasattr(v1, '__len__'):
for i, (a, b) in enumerate(zip(v1, v2)):
self.assertAlmostEqual(a, b, places=places,
msg="(index %d) %s, a=%f b=%f from %s, %s" % (i, msg, a, b, v1, v2))
else:
self.assertAlmostEqual(v1, v2, places=places,
msg="%s, a=%f b=%f" % (msg, v1, v2))
def test_distance(self):
# Transform A -- a simple translation/offset of (0,-0.2)
self.transformA = b2Transform()
self.transformA.SetIdentity()
self.transformA.position = (0, -0.2)
# Transform B -- a translation and a rotation
self.transformB = b2Transform()
self.positionB = b2Vec2(12.017401,0.13678508)
self.angleB = -0.0109265
self.transformB.Set(self.positionB, self.angleB)
# The two shapes, transformed by the respective transform[A,B]
self.polygonA = b2PolygonShape(box=(10,0.2))
self.polygonB = b2PolygonShape(box=(2,0.1))
# Calculate the distance between the two shapes with the specified transforms
dist_result=b2Distance(shapeA=self.polygonA,
transformA=self.transformA,
idxA=0,
shapeB=self.polygonB,
transformB=self.transformB,
idxB=0,
useRadii=True)
self.checkAlmostEqual(dist_result[0], (10, 0.01), 'point A', places=2)
self.checkAlmostEqual(dist_result[1], (10, 0.05), 'point B', places=1)
self.checkAlmostEqual(dist_result[2], 0.04, 'distance', places=2)
assert(dist_result[3] == 2)
input_ = b2DistanceInput(proxyA=b2DistanceProxy(self.polygonA, 0),
transformA=self.transformA,
proxyB=b2DistanceProxy(self.polygonB, 0),
transformB=self.transformB,
useRadii=True)
assert(dist_result == b2Distance(input_))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import Box2D as b2
import sys
class testEdgeChain (unittest.TestCase):
def setUp(self):
pass
def test_create_edge_chain(self):
world = b2.b2World()
ground = world.CreateBody(position=(0, 20))
try:
ground.CreateEdgeChain([])
except ValueError:
pass #good
except Exception:
self.fail("Failed to create empty edge chain (%s)" % sys.exc_info()[1])
try:
ground.CreateEdgeChain(
[ (-20,-20),
(-20, 20),
( 20, 20),
( 20,-20),
(-20,-20) ]
)
except Exception:
self.fail("Failed to create valid edge chain (%s)" % sys.exc_info()[1])
def test_b2EdgeShape(self):
world = b2.b2World()
v1=(-10.0, 0.0)
v2=(-7.0, -1.0)
v3=(-4.0, 0.0)
ground=world.CreateStaticBody(shapes=
[b2.b2EdgeShape(vertices=[None, v1, v2, v3])])
if __name__ == '__main__':
unittest.main()
|
raise NotImplementedError('This should be auto-generated by swig')
|
raise NotImplementedError
|
# pybox2d -- http://pybox2d.googlecode.com
#
# Copyright (c) 2010 Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
__author__='Ken Lauer'
__license__='zlib'
__date__="$Date$"
__version__="$Revision$"
__doc__="""
This module holds the full usable contents of pybox2d.
It offers an alternative syntax in the form of:
from Box2D.b2 import *
a = vec2(1,1) + vec2(2,2)
This is fully equivalent to:
from Box2D import *
a = b2Vec2(1,1) + b2Vec2(2,2)
All classes that exist in the main module that are prefixed
by b2 or b2_ have been stripped. Beware that importing *
from a module is generally frowned upon -- this is mainly
here for convenience in debugging sessions where typing
b2Vec2 repeatedly gets very old very quickly (trust me,
I know.)
"""
# Populated by the parent package (see the end of ../Box2D.py)
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a simple example of building and running a simulation using Box2D. Here
we create a large ground box and a small dynamic box.
** NOTE **
There is no graphical output for this simple example, only text.
"""
from Box2D import (b2PolygonShape, b2World)
world = b2World() # default gravity is (0,-10) and doSleep is True
groundBody = world.CreateStaticBody(position=(0, -10),
shapes=b2PolygonShape(box=(50, 10)),
)
# Create a dynamic body at (0, 4)
body = world.CreateDynamicBody(position=(0, 4))
# And add a box fixture onto it (with a nonzero density, so it will move)
box = body.CreatePolygonFixture(box=(1, 1), density=1, friction=0.3)
# Prepare for simulation. Typically we use a time step of 1/60 of a second
# (60Hz) and 6 velocity/2 position iterations. This provides a high quality
# simulation in most game scenarios.
timeStep = 1.0 / 60
vel_iters, pos_iters = 6, 2
# This is our little game loop.
for i in range(60):
# Instruct the world to perform a single step of simulation. It is
# generally best to keep the time step and iterations fixed.
world.Step(timeStep, vel_iters, pos_iters)
# Clear applied body forces. We didn't apply any forces, but you should
# know about this function.
world.ClearForces()
# Now print the position and angle of the body.
print(body.position, body.angle)
# You can also work closer to the C++ Box2D library, not using the niceties
# supplied by pybox2d. Creating a world and a few bodies becomes much more
# verbose:
'''
from Box2D import (b2BodyDef, b2FixtureDef)
# Construct a world object, which will hold and simulate the rigid bodies.
world = b2World(gravity=(0, -10), doSleep=True)
# Define the ground body.
groundBodyDef = b2BodyDef()
groundBodyDef.position = (0, -10)
# Make a body fitting this definition in the world.
groundBody = world.CreateBody(groundBodyDef)
# Create a big static box to represent the ground
groundBox = b2PolygonShape(box=(50, 10))
# And create a fixture definition to hold the shape
groundBoxFixture = b2FixtureDef(shape=groundBox)
# Add the ground shape to the ground body.
groundBody.CreateFixture(groundBoxFixture)
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, Keys, main)
from math import sqrt
from Box2D import (b2FixtureDef, b2PolygonShape,
b2Transform, b2Mul,
b2_pi)
class ApplyForce (Framework):
name = "ApplyForce"
description = "Use w, a, and d to control the ship."
def __init__(self):
super(ApplyForce, self).__init__()
self.world.gravity = (0.0, 0.0)
# The boundaries
ground = self.world.CreateBody(position=(0, 20))
ground.CreateEdgeChain(
[(-20, -20),
(-20, 20),
(20, 20),
(20, -20),
(-20, -20)]
)
# TODO: make note of transform.R.set() -> transform.angle =
xf1 = b2Transform()
xf1.angle = 0.3524 * b2_pi
xf1.position = xf1.R * (1.0, 0.0)
xf2 = b2Transform()
xf2.angle = -0.3524 * b2_pi
xf2.position = xf2.R * (-1.0, 0.0)
self.body = self.world.CreateDynamicBody(
position=(0, 2),
angle=b2_pi,
angularDamping=5,
linearDamping=0.1,
shapes=[b2PolygonShape(vertices=[xf1 * (-1, 0), xf1 * (1, 0),
xf1 * (0, .5)]),
b2PolygonShape(vertices=[xf2 * (-1, 0), xf2 * (1, 0),
xf2 * (0, .5)])],
shapeFixture=b2FixtureDef(density=2.0),
)
gravity = 10.0
fixtures = b2FixtureDef(shape=b2PolygonShape(box=(0.5, 0.5)),
density=1, friction=0.3)
for i in range(10):
body = self.world.CreateDynamicBody(
position=(0, 5 + 1.54 * i), fixtures=fixtures)
# For a circle: I = 0.5 * m * r * r ==> r = sqrt(2 * I / m)
r = sqrt(2.0 * body.inertia / body.mass)
self.world.CreateFrictionJoint(
bodyA=ground,
bodyB=body,
localAnchorA=(0, 0),
localAnchorB=(0, 0),
collideConnected=True,
maxForce=body.mass * gravity,
maxTorque=body.mass * r * gravity
)
def Keyboard(self, key):
if not self.body:
return
if key == Keys.K_w:
f = self.body.GetWorldVector(localVector=(0.0, -200.0))
p = self.body.GetWorldPoint(localPoint=(0.0, 2.0))
self.body.ApplyForce(f, p, True)
elif key == Keys.K_a:
self.body.ApplyTorque(50.0, True)
elif key == Keys.K_d:
self.body.ApplyTorque(-50.0, True)
if __name__ == "__main__":
main(ApplyForce)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A small self contained example showing how to use OpencvDrawFuncs
to integrate pybox2d into an opencv mainloop
In short:
One static body:
+ One fixture: big polygon to represent the ground
Two dynamic bodies:
+ One fixture: a polygon
+ One fixture: a circle
And some drawing code that extends the shape classes.
John Stowers
"""
import cv2
import Box2D
from Box2D.b2 import (polygonShape, world)
from opencv_draw import OpencvDrawFuncs
# --- constants ---
# Box2D deals with meters, but we want to display pixels,
# so define a conversion factor:
TARGET_FPS = 60
TIME_STEP = 1.0 / TARGET_FPS
# --- pybox2d world setup ---
# Create the world
world = world(gravity=(0, -10), doSleep=True)
# And a static body to hold the ground shape
ground_body = world.CreateStaticBody(
position=(0, 0),
shapes=polygonShape(box=(50, 1)),
)
# Create a couple dynamic bodies
bodyc = world.CreateDynamicBody(position=(20, 45))
circle = bodyc.CreateCircleFixture(radius=0.5, density=1, friction=0.3)
bodyb = world.CreateDynamicBody(position=(30, 45), angle=15)
box = bodyb.CreatePolygonFixture(box=(2, 1), density=1, friction=0.3)
world.CreateWeldJoint(bodyA=bodyc, bodyB=bodyb, anchor=bodyb.worldCenter)
drawer = OpencvDrawFuncs(w=640, h=480, ppm=20)
drawer.install()
while True:
key = 0xFF & cv2.waitKey(int(TIME_STEP * 1000)) # milliseconds
if key == 27:
break
drawer.clear_screen()
drawer.draw_world(world)
# Make Box2D simulate the physics of our world for one step.
world.Step(TIME_STEP, 10, 10)
# Flip the screen and try to keep at the target FPS
cv2.imshow("world", drawer.screen)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2Filter, b2FixtureDef,
b2PolygonShape, b2Vec2)
class CollisionFiltering (Framework):
name = "Collision Filtering"
description = "See which shapes collide with each other."
# This is a test of collision filtering.
# There is a triangle, a box, and a circle.
# There are 6 shapes. 3 large and 3 small.
# The 3 small ones always collide.
# The 3 large ones never collide.
# The boxes don't collide with triangles (except if both are small).
# The box connected to the large triangle has no filter settings,
# so it collides with everything.
def __init__(self):
super(CollisionFiltering, self).__init__()
# Ground body
world = self.world
ground = world.CreateBody(
shapes=b2EdgeShape(vertices=[(-40, 0), (40, 0)])
)
# Define the groups that fixtures can fall into
# Note that negative groups never collide with other negative ones.
smallGroup = 1
largeGroup = -1
# And the categories
# Note that these are bit-locations, and as such are written in
# hexadecimal.
# defaultCategory = 0x0001
triangleCategory = 0x0002
boxCategory = 0x0004
circleCategory = 0x0008
# And the masks that define which can hit one another
# A mask of 0xFFFF means that it will collide with everything else in
# its group. The box mask below uses an exclusive OR (XOR) which in
# effect toggles the triangleCategory bit, making boxMask = 0xFFFD.
# Such a mask means that boxes never collide with triangles. (if
# you're still confused, see the implementation details below)
triangleMask = 0xFFFF
boxMask = 0xFFFF ^ triangleCategory
circleMask = 0xFFFF
# The actual implementation determining whether or not two objects
# collide is defined in the C++ source code, but it can be overridden
# in Python (with b2ContactFilter).
# The default behavior goes like this:
# if (filterA.groupIndex == filterB.groupIndex and filterA.groupIndex != 0):
# collide if filterA.groupIndex is greater than zero (negative groups never collide)
# else:
# collide if (filterA.maskBits & filterB.categoryBits) != 0 and (filterA.categoryBits & filterB.maskBits) != 0
#
# So, if they have the same group index (and that index isn't the
# default 0), then they collide if the group index is > 0 (since
# negative groups never collide)
# (Note that a body with the default filter settings will always
# collide with everything else.)
# If their group indices differ, then only if their bitwise-ANDed
# category and mask bits match up do they collide.
#
# For more help, some basics of bit masks might help:
# -> http://en.wikipedia.org/wiki/Mask_%28computing%29
# Small triangle
triangle = b2FixtureDef(
shape=b2PolygonShape(vertices=[(-1, 0), (1, 0), (0, 2)]),
density=1,
filter=b2Filter(
groupIndex=smallGroup,
categoryBits=triangleCategory,
maskBits=triangleMask,
)
)
world.CreateDynamicBody(
position=(-5, 2),
fixtures=triangle,
)
# Large triangle (recycle definitions)
triangle.shape.vertices = [
2.0 * b2Vec2(v) for v in triangle.shape.vertices]
triangle.filter.groupIndex = largeGroup
trianglebody = world.CreateDynamicBody(
position=(-5, 6),
fixtures=triangle,
fixedRotation=True, # <--
)
# note that the large triangle will not rotate
# Small box
box = b2FixtureDef(
shape=b2PolygonShape(box=(1, 0.5)),
density=1,
restitution=0.1,
filter = b2Filter(
groupIndex=smallGroup,
categoryBits=boxCategory,
maskBits=boxMask,
)
)
world.CreateDynamicBody(
position=(0, 2),
fixtures=box,
)
# Large box
box.shape.box = (2, 1)
box.filter.groupIndex = largeGroup
world.CreateDynamicBody(
position=(0, 6),
fixtures=box,
)
# Small circle
circle = b2FixtureDef(
shape=b2CircleShape(radius=1),
density=1,
filter=b2Filter(
groupIndex=smallGroup,
categoryBits=circleCategory,
maskBits=circleMask,
)
)
world.CreateDynamicBody(
position=(5, 2),
fixtures=circle,
)
# Large circle
circle.shape.radius *= 2
circle.filter.groupIndex = largeGroup
world.CreateDynamicBody(
position=(5, 6),
fixtures=circle,
)
# Create a joint for fun on the big triangle
# Note that it does not inherit or have anything to do with the
# filter settings of the attached triangle.
box = b2FixtureDef(shape=b2PolygonShape(box=(0.5, 1)), density=1)
testbody = world.CreateDynamicBody(
position=(-5, 10),
fixtures=box,
)
world.CreatePrismaticJoint(
bodyA=trianglebody,
bodyB=testbody,
enableLimit=True,
localAnchorA=(0, 4),
localAnchorB=(0, 0),
localAxisA=(0, 1),
lowerTranslation=-1,
upperTranslation=1,
)
if __name__ == "__main__":
main(CollisionFiltering)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2FixtureDef, b2PolygonShape, b2_pi)
class Tumbler (Framework):
name = "Tumbler"
description = ''
count = 800
def __init__(self):
Framework.__init__(self)
ground = self.world.CreateBody()
body = self.world.CreateDynamicBody(
position=(0, 10),
allowSleep=False,
shapeFixture=b2FixtureDef(density=5.0),
shapes=[
b2PolygonShape(box=(0.5, 10, (10, 0), 0)),
b2PolygonShape(box=(0.5, 10, (-10, 0), 0)),
b2PolygonShape(box=(10, 0.5, (0, 10), 0)),
b2PolygonShape(box=(10, 0.5, (0, -10), 0)),
]
)
self.joint = self.world.CreateRevoluteJoint(bodyA=ground, bodyB=body,
localAnchorA=(0, 10), localAnchorB=(0, 0),
referenceAngle=0, motorSpeed=0.05 * b2_pi,
enableMotor=True, maxMotorTorque=1.0e8)
def Step(self, settings):
Framework.Step(self, settings)
self.count -= 1
if self.count == 0:
return
self.world.CreateDynamicBody(
position=(0, 10),
allowSleep=False,
fixtures=b2FixtureDef(
density=1.0, shape=b2PolygonShape(box=(0.125, 0.125))),
)
if __name__ == "__main__":
main(Tumbler)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, Keys, main)
from Box2D import (b2DistanceJointDef, b2EdgeShape, b2FixtureDef,
b2PolygonShape)
class Web(Framework):
name = "Web"
description = "This demonstrates a soft distance joint. Press: (b) to delete a body, (j) to delete a joint"
bodies = []
joints = []
def __init__(self):
super(Web, self).__init__()
# The ground
ground = self.world.CreateBody(
shapes=b2EdgeShape(vertices=[(-40, 0), (40, 0)])
)
fixture = b2FixtureDef(shape=b2PolygonShape(box=(0.5, 0.5)),
density=5, friction=0.2)
self.bodies = [self.world.CreateDynamicBody(
position=pos,
fixtures=fixture
) for pos in ((-5, 5), (5, 5), (5, 15), (-5, 15))]
bodies = self.bodies
# Create the joints between each of the bodies and also the ground
# bodyA bodyB localAnchorA localAnchorB
sets = [(ground, bodies[0], (-10, 0), (-0.5, -0.5)),
(ground, bodies[1], (10, 0), (0.5, -0.5)),
(ground, bodies[2], (10, 20), (0.5, 0.5)),
(ground, bodies[3], (-10, 20), (-0.5, 0.5)),
(bodies[0], bodies[1], (0.5, 0), (-0.5, 0)),
(bodies[1], bodies[2], (0, 0.5), (0, -0.5)),
(bodies[2], bodies[3], (-0.5, 0), (0.5, 0)),
(bodies[3], bodies[0], (0, -0.5), (0, 0.5)),
]
# We will define the positions in the local body coordinates, the length
# will automatically be set by the __init__ of the b2DistanceJointDef
self.joints = []
for bodyA, bodyB, localAnchorA, localAnchorB in sets:
dfn = b2DistanceJointDef(
frequencyHz=4.0,
dampingRatio=0.5,
bodyA=bodyA,
bodyB=bodyB,
localAnchorA=localAnchorA,
localAnchorB=localAnchorB,
)
self.joints.append(self.world.CreateJoint(dfn))
def Keyboard(self, key):
if key == Keys.K_b:
for body in self.bodies:
# Gets both FixtureDestroyed and JointDestroyed callbacks.
self.world.DestroyBody(body)
break
elif key == Keys.K_j:
for joint in self.joints:
# Does not get a JointDestroyed callback!
self.world.DestroyJoint(joint)
self.joints.remove(joint)
break
def FixtureDestroyed(self, fixture):
super(Web, self).FixtureDestroyed(fixture)
body = fixture.body
if body in self.bodies:
print(body)
self.bodies.remove(body)
print("Fixture destroyed, removing its body from the list. Bodies left: %d"
% len(self.bodies))
def JointDestroyed(self, joint):
if joint in self.joints:
self.joints.remove(joint)
print("Joint destroyed and removed from the list. Joints left: %d"
% len(self.joints))
if __name__ == "__main__":
main(Web)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, Keys, main)
from Box2D import (b2Clamp, b2Color, b2PolygonShape, b2Random,
b2_maxPolygonVertices)
class ConvexHull (Framework):
name = "ConvexHull"
description = ('Press g to generate a new random convex hull, a to switch '
'to automatic mode')
def __init__(self):
Framework.__init__(self)
self.auto = False
self.generate()
def generate(self):
lower = (-8, -8)
upper = (8, 8)
self.verts = verts = []
for i in range(b2_maxPolygonVertices):
x = 10.0 * b2Random(0.0, 10.0)
y = 10.0 * b2Random(0.0, 10.0)
# Clamp onto a square to help create collinearities.
# This will stress the convex hull algorithm.
verts.append(b2Clamp((x, y), lower, upper))
def Keyboard(self, key):
if key == Keys.K_a:
self.auto = not self.auto
elif key == Keys.K_g:
self.generate()
def Step(self, settings):
Framework.Step(self, settings)
renderer = self.renderer
try:
poly = b2PolygonShape(vertices=self.verts)
except AssertionError as ex:
self.Print('b2PolygonShape failed: %s' % ex)
else:
self.Print('Valid: %s' % poly.valid)
renderer.DrawPolygon([renderer.to_screen(v)
for v in self.verts], b2Color(0.9, 0.9, 0.9))
for i, v in enumerate(self.verts):
renderer.DrawPoint(renderer.to_screen(v), 2.0,
b2Color(0.9, 0.5, 0.5))
x, y = renderer.to_screen(v)
self.DrawStringAt(x + 0.05, y + 0.05, '%d' % i)
if self.auto:
self.generate()
if __name__ == "__main__":
main(ConvexHull)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2EdgeShape, b2FixtureDef, b2PolygonShape, b2Vec2)
class Pyramid (Framework):
name = "Pyramid"
def __init__(self):
super(Pyramid, self).__init__()
# The ground
ground = self.world.CreateStaticBody(
shapes=b2EdgeShape(vertices=[(-40, 0), (40, 0)])
)
box_half_size = (0.5, 0.5)
box_density = 5.0
box_rows = 20
x = b2Vec2(-7, 0.75)
deltaX = (0.5625, 1.25)
deltaY = (1.125, 0)
for i in range(box_rows):
y = x.copy()
for j in range(i, box_rows):
self.world.CreateDynamicBody(
position=y,
fixtures=b2FixtureDef(
shape=b2PolygonShape(box=box_half_size),
density=box_density)
)
y += deltaY
x += deltaX
if __name__ == "__main__":
main(Pyramid)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef)
class Restitution (Framework):
name = "Restitution example"
description = "Note the difference in bounce height of the circles"
def __init__(self):
super(Restitution, self).__init__()
# The ground
ground = self.world.CreateStaticBody(
shapes=b2EdgeShape(vertices=[(-20, 0), (20, 0)])
)
radius = 1.0
density = 1.0
# The bodies
for i, restitution in enumerate([0.0, 0.1, 0.3, 0.5, 0.75, 0.9, 1.0]):
self.world.CreateDynamicBody(
position=(-10 + 3.0 * i, 20),
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=radius),
density=density, restitution=restitution)
)
if __name__ == "__main__":
main(Restitution)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from math import cos, sin
from .framework import (Framework, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef, b2PolygonShape,
b2_pi)
class CharacterCollision(Framework):
name = "Character Collision"
description = ("This tests various character collision shapes.\n"
"Limitation: Square and hexagon can snag on aligned boxes.\n"
"Feature: Loops have smooth collision, inside and out."
)
def __init__(self):
super(CharacterCollision, self).__init__()
ground = self.world.CreateStaticBody(
position=(0, 0),
shapes=b2EdgeShape(vertices=[(-20, 0), (20, 0)])
)
# Collinear edges
self.world.CreateStaticBody(
shapes=[b2EdgeShape(vertices=[(-8, 1), (-6, 1)]),
b2EdgeShape(vertices=[(-6, 1), (-4, 1)]),
b2EdgeShape(vertices=[(-4, 1), (-2, 1)]),
]
)
# Square tiles
self.world.CreateStaticBody(
shapes=[b2PolygonShape(box=[1, 1, (4, 3), 0]),
b2PolygonShape(box=[1, 1, (6, 3), 0]),
b2PolygonShape(box=[1, 1, (8, 3), 0]),
]
)
# Square made from an edge loop. Collision should be smooth.
body = self.world.CreateStaticBody()
body.CreateLoopFixture(vertices=[(-1, 3), (1, 3), (1, 5), (-1, 5)])
# Edge loop.
body = self.world.CreateStaticBody(position=(-10, 4))
body.CreateLoopFixture(vertices=[
(0.0, 0.0), (6.0, 0.0),
(6.0, 2.0), (4.0, 1.0),
(2.0, 2.0), (0.0, 2.0),
(-2.0, 2.0), (-4.0, 3.0),
(-6.0, 2.0), (-6.0, 0.0), ]
)
# Square character 1
self.world.CreateDynamicBody(
position=(-3, 8),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(shape=b2PolygonShape(
box=(0.5, 0.5)), density=20.0),
)
# Square character 2
body = self.world.CreateDynamicBody(
position=(-5, 5),
fixedRotation=True,
allowSleep=False,
)
body.CreatePolygonFixture(box=(0.25, 0.25), density=20.0)
# Hexagon character
a = b2_pi / 3.0
self.world.CreateDynamicBody(
position=(-5, 8),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(
shape=b2PolygonShape(
vertices=[(0.5 * cos(i * a), 0.5 * sin(i * a))
for i in range(6)]),
density=20.0
),
)
# Circle character
self.world.CreateDynamicBody(
position=(3, 5),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=0.5),
density=20.0
),
)
def Step(self, settings):
super(CharacterCollision, self).Step(settings)
pass
if __name__ == "__main__":
main(CharacterCollision)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D.b2 import (edgeShape, polygonShape, fixtureDef)
# This test uses the alternative syntax offered by Box2D.b2, so you'll notice
# that all of the classes that normally have 'b2' in front of them no longer
# do. The choice of which to use is mostly stylistic and is left up to the
# user.
class Chain (Framework):
name = "Chain"
def __init__(self):
super(Chain, self).__init__()
# The ground
ground = self.world.CreateBody(
shapes=edgeShape(vertices=[(-40, 0), (40, 0)])
)
plank = fixtureDef(
shape=polygonShape(box=(0.6, 0.125)),
density=20,
friction=0.2,
)
# Create one Chain (Only the left end is fixed)
prevBody = ground
y = 25
numPlanks = 30
for i in range(numPlanks):
body = self.world.CreateDynamicBody(
position=(0.5 + i, y),
fixtures=plank,
)
# You can try a WeldJoint for a slightly different effect.
# self.world.CreateWeldJoint(
self.world.CreateRevoluteJoint(
bodyA=prevBody,
bodyB=body,
anchor=(i, y),
)
prevBody = body
if __name__ == "__main__":
main(Chain)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from math import ceil, log
from .framework import (Framework, main)
from Box2D import (b2FixtureDef, b2PolygonShape, b2Vec2)
class Tiles (Framework):
name = "Tiles"
description = ('This stress tests the dynamic tree broad-phase. This also'
'shows that tile based collision\nis _not_ smooth due to '
'Box2D not knowing about adjacency.')
def __init__(self):
super(Tiles, self).__init__()
a = 0.5
def ground_positions():
N = 200
M = 10
position = b2Vec2(0, 0)
for i in range(M):
position.x = -N * a
for j in range(N):
yield position
position.x += 2.0 * a
position.y -= 2.0 * a
ground = self.world.CreateStaticBody(
position=(0, -a),
shapes=[b2PolygonShape(box=(a, a, position, 0))
for position in ground_positions()]
)
count = 20
def dynamic_positions():
x = b2Vec2(-7.0, 0.75)
deltaX = (0.5625, 1.25)
deltaY = (1.125, 0.0)
for i in range(count):
y = x.copy()
for j in range(i, count):
yield y
y += deltaY
x += deltaX
for pos in dynamic_positions():
self.world.CreateDynamicBody(
position=pos,
fixtures=b2FixtureDef(
shape=b2PolygonShape(box=(a, a)), density=5)
)
def Step(self, settings):
super(Tiles, self).Step(settings)
cm = self.world.contactManager
height = cm.broadPhase.treeHeight
leafCount = cm.broadPhase.proxyCount
minNodeCount = 2 * leafCount - 1
minHeight = ceil(log(float(minNodeCount)) / log(2))
self.Print('Dynamic tree height=%d, min=%d' % (height, minHeight))
if __name__ == "__main__":
main(Tiles)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# Contributed by Giorgos Giagas (giorgosg)
# - updated for 2.1 by Ken
from math import sin, cos, pi
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2FixtureDef)
def create_blob(world, center, radius, circle_radius=0.5, shape_num=24,
angularDamping=0.5, linearDamping=0.5, friction=0.5,
density=5.0, **kwargs):
def get_pos(angle):
return (cos(angle * pi / 180.0) * radius + center[0],
sin(angle * pi / 180.0) * radius + center[1])
circle = b2CircleShape(radius=circle_radius)
fixture = b2FixtureDef(shape=circle, friction=friction, density=density,
restitution=0.0)
bodies = [world.CreateDynamicBody(position=get_pos(
i), fixtures=fixture) for i in range(0, 360, int(360 / shape_num))]
joints = []
prev_body = bodies[-1]
for body in bodies:
joint = world.CreateDistanceJoint(
bodyA=prev_body,
bodyB=body,
anchorA=prev_body.position,
anchorB=body.position,
dampingRatio=10.0)
joints.append(joint)
prev_body = body
return bodies, joints
def add_spring_force(bodyA, localA, bodyB, localB, force_k, friction,
desiredDist):
worldA = bodyA.GetWorldPoint(localA)
worldB = bodyB.GetWorldPoint(localB)
diff = worldB - worldA
# Find velocities of attach points
velA = bodyA.linearVelocity - \
bodyA.GetWorldVector(localA).cross(bodyA.angularVelocity)
velB = bodyB.linearVelocity - \
bodyB.GetWorldVector(localB).cross(bodyB.angularVelocity)
vdiff = velB - velA
dx = diff.Normalize() # Normalizes diff and puts length into dx
vrel = vdiff.x * diff.x + vdiff.y * diff.y
forceMag = -force_k * (dx - desiredDist) - friction * vrel
bodyB.ApplyForce(diff * forceMag, bodyA.GetWorldPoint(localA), True)
bodyA.ApplyForce(diff * -forceMag, bodyB.GetWorldPoint(localB), True)
def blob_step(world, blob_bodies, radius, upward_force, move=0,
spring_friction=5.0):
body_count = len(blob_bodies)
bodies1, bodies2 = (blob_bodies[:body_count // 2],
blob_bodies[body_count // 2:])
for body1, body2 in zip(bodies1, bodies2):
add_spring_force(body1, (0, 0), body2, (0, 0),
upward_force, spring_friction, radius * 2)
if move:
top_body = [(body.position.y, body) for body in blob_bodies]
top_body.sort(key=lambda val: val[0])
top_body = top_body[-1][1]
top_body.ApplyForce((move, 0), top_body.position, True)
class GishTribute (Framework):
name = "Tribute to Gish"
description = 'Keys: Left (a), Right (d), Jump (w), Stop (s)'
move = 0
jump = 100
def __init__(self):
super(GishTribute, self).__init__()
# The ground
ground = self.world.CreateStaticBody()
ground.CreateEdgeFixture(vertices=[(-50, 0), (50, 0)], friction=0.2)
ground.CreateEdgeFixture(vertices=[(-50, 0), (-50, 10)], friction=0.2)
ground.CreateEdgeFixture(vertices=[(50, 0), (50, 10)], friction=0.2)
for i in range(2, 18, 2):
body = self.world.CreateDynamicBody(position=(-10.1, i))
body.CreatePolygonFixture(box=(3.0, 1.0), density=3.0)
self.blob_radius = 2
self.bodies, self.joints = create_blob(
self.world, (-10, 50), self.blob_radius, circle_radius=0.5)
def Keyboard(self, key):
if key == Keys.K_w:
self.jump = 10000
elif key == Keys.K_a:
self.move = -500
elif key == Keys.K_d:
self.move = 500
elif key == Keys.K_s:
self.move = 0
self.jump = 100
def Step(self, settings):
Framework.Step(self, settings)
blob_step(self.world, self.bodies,
self.blob_radius, self.jump, self.move)
self.jump = 100
if __name__ == "__main__":
main(GishTribute)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from math import sqrt
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2FixtureDef, b2PolygonShape, b2Random,
b2Vec2, b2_epsilon)
# ***** NOTE *****
# ***** NOTE *****
# This example does not appear to be working currently...
# It was ported from the JBox2D (Java) version
# ***** NOTE *****
# ***** NOTE *****
class Liquid (Framework):
name = "Liquid Test"
description = ''
bullet = None
num_particles = 1000
total_mass = 10.0
fluid_minx = -11.0
fluid_maxx = 5.0
fluid_miny = -10.0
fluid_maxy = 10.0
hash_width = 40
hash_height = 40
rad = 0.6
visc = 0.004
def __init__(self):
super(Liquid, self).__init__()
self.per_particle_mass = self.total_mass / self.num_particles
ground = self.world.CreateStaticBody(
shapes=[
b2PolygonShape(box=[5.0, 0.5]),
b2PolygonShape(box=[1.0, 0.2, (0, 4), -0.2]),
b2PolygonShape(box=[1.5, 0.2, (-1.2, 5.2), -1.5]),
b2PolygonShape(box=[0.5, 50.0, (5, 0), 0.0]),
b2PolygonShape(box=[0.5, 3.0, (-8, 0), 0.0]),
b2PolygonShape(box=[2.0, 0.1, (-6, -2.8), 0.1]),
b2CircleShape(radius=0.5, pos=(-.5, -4)),
]
)
cx = 0
cy = 25
box_width = 2.0
box_height = 20.0
self.liquid = []
for i in range(self.num_particles):
self.createDroplet((b2Random(cx - box_width * 0.5,
cx + box_width * 0.5),
b2Random(cy - box_height * 0.5,
cy + box_height * 0.5)))
self.createBoxSurfer()
if hasattr(self, 'settings'):
self.settings.enableSubStepping = False
def createBoxSurfer(self):
self.surfer = self.world.CreateDynamicBody(position=(0, 25))
self.surfer.CreatePolygonFixture(
density=1,
box=(b2Random(0.3, 0.7), b2Random(0.3, 0.7)),
)
def createDroplet(self, position):
body = self.world.CreateDynamicBody(
position=position,
fixedRotation=True,
allowSleep=False,
)
body.CreateCircleFixture(
groupIndex=-10,
radius=0.05,
restitution=0.4,
friction=0,
)
body.mass = self.per_particle_mass
self.liquid.append(body)
def applyLiquidConstraint(self, dt):
# (original comments left untouched)
# Unfortunately, this simulation method is not actually scale
# invariant, and it breaks down for rad < ~3 or so. So we need
# to scale everything to an ideal rad and then scale it back after.
idealRad = 50
idealRad2 = idealRad ** 2
multiplier = idealRad / self.rad
info = dict([(drop, (drop.position, multiplier * drop.position,
multiplier * drop.linearVelocity))
for drop in self.liquid])
change = dict([(drop, b2Vec2(0, 0)) for drop in self.liquid])
dx = self.fluid_maxx - self.fluid_minx
dy = self.fluid_maxy - self.fluid_miny
range_ = (-1, 0, 1)
hash_width = self.hash_width
hash_height = self.hash_height
max_len = 9.9e9
visc = self.visc
hash = self.hash
neighbors = set()
# Populate the neighbor list from the 9 nearest cells
for drop, ((worldx, worldy), (mx, my), (mvx, mvy)) in list(info.items()):
hx = int((worldx / dx) * hash_width)
hy = int((worldy / dy) * hash_height)
neighbors.clear()
for nx in range_:
xc = hx + nx
if not (0 <= xc < hash_width):
continue
for ny in range_:
yc = hy + ny
if yc in hash[xc]:
for neighbor in hash[xc][yc]:
neighbors.add(neighbor)
if drop in neighbors:
neighbors.remove(drop)
# Particle pressure calculated by particle proximity
# Pressures = 0 iff all particles within range are idealRad
# distance away
lengths = []
p = 0
pnear = 0
for neighbor in neighbors:
nx, ny = info[neighbor][1]
vx, vy = nx - mx, ny - my
if -idealRad < vx < idealRad and -idealRad < vy < idealRad:
len_sqr = vx ** 2 + vy ** 2
if len_sqr < idealRad2:
len_ = sqrt(len_sqr)
if len_ < b2_epsilon:
len_ = idealRad - 0.01
lengths.append(len_)
oneminusq = 1.0 - (len_ / idealRad)
sq = oneminusq ** 2
p += sq
pnear += sq * oneminusq
else:
lengths.append(max_len)
# Now actually apply the forces
pressure = (p - 5) / 2.0 # normal pressure term
presnear = pnear / 2.0 # near particles term
changex, changey = 0, 0
for len_, neighbor in zip(lengths, neighbors):
(nx, ny), (nvx, nvy) = info[neighbor][1:3]
vx, vy = nx - mx, ny - my
if -idealRad < vx < idealRad and -idealRad < vy < idealRad:
if len_ < idealRad:
oneminusq = 1.0 - (len_ / idealRad)
factor = oneminusq * \
(pressure + presnear * oneminusq) / (2.0 * len_)
dx_, dy_ = vx * factor, vy * factor
relvx, relvy = nvx - mvx, nvy - mvy
factor = visc * oneminusq * dt
dx_ -= relvx * factor
dy_ -= relvy * factor
change[neighbor] += (dx_, dy_)
changex -= dx_
changey -= dy_
change[drop] += (changex, changey)
for drop, (dx_, dy_) in list(change.items()):
if dx_ != 0 or dy_ != 0:
drop.position += (dx_ / multiplier, dy_ / multiplier)
drop.linearVelocity += (dx_ / (multiplier * dt),
dy_ / (multiplier * dt))
def hashLocations(self):
hash_width = self.hash_width
hash_height = self.hash_height
self.hash = hash = dict([(i, {}) for i in range(hash_width)])
info = [(drop, drop.position) for drop in self.liquid]
dx = self.fluid_maxx - self.fluid_minx
dy = self.fluid_maxy - self.fluid_miny
xs, ys = set(), set()
for drop, (worldx, worldy) in info:
hx = int((worldx / dx) * hash_width)
hy = int((worldy / dy) * hash_height)
xs.add(hx)
ys.add(hy)
if 0 <= hx < hash_width and 0 <= hy < hash_height:
x = hash[hx]
if hy not in x:
x[hy] = [drop]
else:
x[hy].append(drop)
def dampenLiquid(self):
for drop in self.liquid:
drop.linearVelocity *= 0.995
def checkBounds(self):
self.hash = None
to_remove = [
drop for drop in self.liquid if drop.position.y < self.fluid_miny]
for drop in to_remove:
self.liquid.remove(drop)
self.world.DestroyBody(drop)
self.createDroplet(
(0.0 + b2Random(-0.6, 0.6), 15.0 + b2Random(-2.3, 2.0)))
if self.surfer.position.y < -15:
self.world.DestroyBody(self.surfer)
self.createBoxSurfer()
def Step(self, settings):
super(Liquid, self).Step(settings)
dt = 1.0 / settings.hz
self.hashLocations()
self.applyLiquidConstraint(dt)
self.dampenLiquid()
self.checkBounds()
def Keyboard(self, key):
if key == Keys.K_b:
if self.bullet:
self.world.DestroyBody(self.bullet)
self.bullet = None
circle = b2FixtureDef(
shape=b2CircleShape(radius=0.25),
density=20,
restitution=0.05)
self.bullet = self.world.CreateDynamicBody(
position=(-31, 5),
bullet=True,
fixtures=circle,
linearVelocity=(400, 0),
)
if __name__ == "__main__":
main(Liquid)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2FixtureDef, b2LoopShape, b2PolygonShape,
b2RevoluteJointDef, b2_pi)
class Pinball (Framework):
name = "Pinball"
description = ('This tests bullet collision and provides an example of a gameplay scenario.\n'
'Press A to control the flippers.')
bodies = []
joints = []
def __init__(self):
super(Pinball, self).__init__()
# The ground
ground = self.world.CreateBody(
shapes=b2LoopShape(vertices=[(0, -2), (8, 6),
(8, 20), (-8, 20),
(-8, 6)]),
)
# Flippers
p1, p2 = (-2, 0), (2, 0)
fixture = b2FixtureDef(shape=b2PolygonShape(box=(1.75, 0.1)), density=1)
flipper = {'fixtures': fixture}
self.leftFlipper = self.world.CreateDynamicBody(
position=p1,
**flipper
)
self.rightFlipper = self.world.CreateDynamicBody(
position=p2,
**flipper
)
rjd = b2RevoluteJointDef(
bodyA=ground,
bodyB=self.leftFlipper,
localAnchorA=p1,
localAnchorB=(0, 0),
enableMotor=True,
enableLimit=True,
maxMotorTorque=1000,
motorSpeed=0,
lowerAngle=-30.0 * b2_pi / 180.0,
upperAngle=5.0 * b2_pi / 180.0,
)
self.leftJoint = self.world.CreateJoint(rjd)
rjd.motorSpeed = 0
rjd.localAnchorA = p2
rjd.bodyB = self.rightFlipper
rjd.lowerAngle = -5.0 * b2_pi / 180.0
rjd.upperAngle = 30.0 * b2_pi / 180.0
self.rightJoint = self.world.CreateJoint(rjd)
# Ball
self.ball = self.world.CreateDynamicBody(
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=0.2),
density=1.0),
bullet=True,
position=(1, 15))
self.pressed = False
def Keyboard(self, key):
if key == Keys.K_a:
self.pressed = True
def KeyboardUp(self, key):
if key == Keys.K_a:
self.pressed = False
def Step(self, settings):
if self.pressed:
self.leftJoint.motorSpeed = 20
self.rightJoint.motorSpeed = -20
else:
self.leftJoint.motorSpeed = -10
self.rightJoint.motorSpeed = 10
super(Pinball, self).Step(settings)
if __name__ == "__main__":
main(Pinball)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2EdgeShape, b2FixtureDef, b2PolygonShape)
class Mobile (Framework):
name = "Mobile"
max_depth = 4
def __init__(self):
Framework.__init__(self)
ground = self.world.CreateStaticBody(
position=(0, 20),
shapes=[b2EdgeShape(vertices=[(-20, 0), (20, 0)])],
)
a = 0.5
depth = 0
self.root = self.add_node(ground, (0, 0), depth, 3.0, a)
self.world.CreateRevoluteJoint(bodyA=ground, bodyB=self.root,
localAnchorA=(0, 0), localAnchorB=(0, a))
def add_node(self, parent, local_anchor, depth, offset, a):
density = 20.0
h = (0, a)
p = parent.position + local_anchor - h
fixture = b2FixtureDef(shape=b2PolygonShape(box=(0.25 * a, a)),
density=density)
body = self.world.CreateDynamicBody(position=p, fixtures=fixture)
if depth == self.max_depth:
return body
a1 = (offset, -a)
a2 = (-offset, -a)
body1 = self.add_node(body, a1, depth + 1, 0.5 * offset, a)
body2 = self.add_node(body, a2, depth + 1, 0.5 * offset, a)
self.world.CreateRevoluteJoint(bodyA=body, bodyB=body1,
localAnchorA=a1, localAnchorB=h)
self.world.CreateRevoluteJoint(bodyA=body, bodyB=body2,
localAnchorA=a2, localAnchorB=h)
return body
if __name__ == "__main__":
main(Mobile)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# Implemented using the pybox2d SWIG interface for Box2D (pybox2d.googlecode.com)
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
class Empty(Framework):
"""You can use this class as an outline for your tests."""
name = "Empty" # Name of the class to display
description = "The description text goes here"
def __init__(self):
"""
Initialize all of your objects here.
Be sure to call the Framework's initializer first.
"""
super(Empty, self).__init__()
# Initialize all of the objects
def Keyboard(self, key):
"""
The key is from Keys.K_*
(e.g., if key == Keys.K_z: ... )
"""
pass
def Step(self, settings):
"""Called upon every step.
You should always call
-> super(Your_Test_Class, self).Step(settings)
at the beginning or end of your function.
If placed at the beginning, it will cause the actual physics step to happen first.
If placed at the end, it will cause the physics step to happen after your code.
"""
super(Empty, self).Step(settings)
# do stuff
# Placed after the physics step, it will draw on top of physics objects
self.Print("*** Base your own testbeds on me! ***")
def ShapeDestroyed(self, shape):
"""
Callback indicating 'shape' has been destroyed.
"""
pass
def JointDestroyed(self, joint):
"""
The joint passed in was removed.
"""
pass
# More functions can be changed to allow for contact monitoring and such.
# See the other testbed examples for more information.
if __name__ == "__main__":
main(Empty)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from .framework import (Framework, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef, b2PolygonShape)
def create_bridge(world, ground, size, offset, plank_count, friction=0.6, density=1.0):
"""
Create a bridge with plank_count planks,
utilizing rectangular planks of size (width, height).
The bridge should start at x_offset, and continue to
roughly x_offset+width*plank_count.
The y will not change.
"""
width, height = size
x_offset, y_offset = offset
half_height = height / 2
plank = b2FixtureDef(
shape=b2PolygonShape(box=(width / 2, height / 2)),
friction=friction,
density=density,
)
bodies = []
prevBody = ground
for i in range(plank_count):
body = world.CreateDynamicBody(
position=(x_offset + width * i, y_offset),
fixtures=plank,
)
bodies.append(body)
world.CreateRevoluteJoint(
bodyA=prevBody,
bodyB=body,
anchor=(x_offset + width * (i - 0.5), y_offset)
)
prevBody = body
world.CreateRevoluteJoint(
bodyA=prevBody,
bodyB=ground,
anchor=(x_offset + width * (plank_count - 0.5), y_offset),
)
return bodies
class Bridge (Framework):
name = "Bridge"
numPlanks = 30 # Number of planks in the bridge
def __init__(self):
super(Bridge, self).__init__()
# The ground
ground = self.world.CreateBody(
shapes=b2EdgeShape(vertices=[(-40, 0), (40, 0)])
)
create_bridge(self.world, ground, (1.0, 0.25),
(-14.5, 5), self.numPlanks, 0.2, 20)
fixture = b2FixtureDef(
shape=b2PolygonShape(vertices=[(-0.5, 0.0),
(0.5, 0.0),
(0.0, 1.5),
]),
density=1.0
)
for i in range(2):
self.world.CreateDynamicBody(
position=(-8 + 8 * i, 12),
fixtures=fixture,
)
fixture = b2FixtureDef(shape=b2CircleShape(radius=0.5), density=1)
for i in range(3):
self.world.CreateDynamicBody(
position=(-6 + 6 * i, 10),
fixtures=fixture,
)
if __name__ == "__main__":
main(Bridge)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
The framework's base is FrameworkBase. See its help for more information.
"""
from time import time
from Box2D import (b2World, b2AABB, b2CircleShape, b2Color, b2Vec2)
from Box2D import (b2ContactListener, b2DestructionListener, b2DrawExtended)
from Box2D import (b2Fixture, b2FixtureDef, b2Joint)
from Box2D import (b2GetPointStates, b2QueryCallback, b2Random)
from Box2D import (b2_addState, b2_dynamicBody, b2_epsilon, b2_persistState)
from .settings import fwSettings
class fwDestructionListener(b2DestructionListener):
"""
The destruction listener callback:
"SayGoodbye" is called when a joint or shape is deleted.
"""
def __init__(self, test, **kwargs):
super(fwDestructionListener, self).__init__(**kwargs)
self.test = test
def SayGoodbye(self, obj):
if isinstance(obj, b2Joint):
if self.test.mouseJoint == obj:
self.test.mouseJoint = None
else:
self.test.JointDestroyed(obj)
elif isinstance(obj, b2Fixture):
self.test.FixtureDestroyed(obj)
class fwQueryCallback(b2QueryCallback):
def __init__(self, p):
super(fwQueryCallback, self).__init__()
self.point = p
self.fixture = None
def ReportFixture(self, fixture):
body = fixture.body
if body.type == b2_dynamicBody:
inside = fixture.TestPoint(self.point)
if inside:
self.fixture = fixture
# We found the object, so stop the query
return False
# Continue the query
return True
class Keys(object):
pass
class FrameworkBase(b2ContactListener):
"""
The base of the main testbed framework.
If you are planning on using the testbed framework and:
* Want to implement your own renderer (other than Pygame, etc.):
You should derive your class from this one to implement your own tests.
See empty.py or any of the other tests for more information.
* Do NOT want to implement your own renderer:
You should derive your class from Framework. The renderer chosen in
fwSettings (see settings.py) or on the command line will automatically
be used for your test.
"""
name = "None"
description = None
TEXTLINE_START = 30
colors = {
'mouse_point': b2Color(0, 1, 0),
'bomb_center': b2Color(0, 0, 1.0),
'bomb_line': b2Color(0, 1.0, 1.0),
'joint_line': b2Color(0.8, 0.8, 0.8),
'contact_add': b2Color(0.3, 0.95, 0.3),
'contact_persist': b2Color(0.3, 0.3, 0.95),
'contact_normal': b2Color(0.4, 0.9, 0.4),
}
def __reset(self):
""" Reset all of the variables to their starting values.
Not to be called except at initialization."""
# Box2D-related
self.points = []
self.world = None
self.bomb = None
self.mouseJoint = None
self.settings = fwSettings
self.bombSpawning = False
self.bombSpawnPoint = None
self.mouseWorld = None
self.using_contacts = False
self.stepCount = 0
# Box2D-callbacks
self.destructionListener = None
self.renderer = None
def __init__(self):
super(FrameworkBase, self).__init__()
self.__reset()
# Box2D Initialization
self.world = b2World(gravity=(0, -10), doSleep=True)
self.destructionListener = fwDestructionListener(test=self)
self.world.destructionListener = self.destructionListener
self.world.contactListener = self
self.t_steps, self.t_draws = [], []
def __del__(self):
pass
def Step(self, settings):
"""
The main physics step.
Takes care of physics drawing (callbacks are executed after the world.Step() )
and drawing additional information.
"""
self.stepCount += 1
# Don't do anything if the setting's Hz are <= 0
if settings.hz > 0.0:
timeStep = 1.0 / settings.hz
else:
timeStep = 0.0
renderer = self.renderer
# If paused, display so
if settings.pause:
if settings.singleStep:
settings.singleStep = False
else:
timeStep = 0.0
self.Print("****PAUSED****", (200, 0, 0))
# Set the flags based on what the settings show
if renderer:
# convertVertices is only applicable when using b2DrawExtended. It
# indicates that the C code should transform box2d coords to screen
# coordinates.
is_extended = isinstance(renderer, b2DrawExtended)
renderer.flags = dict(drawShapes=settings.drawShapes,
drawJoints=settings.drawJoints,
drawAABBs=settings.drawAABBs,
drawPairs=settings.drawPairs,
drawCOMs=settings.drawCOMs,
convertVertices=is_extended,
)
# Set the other settings that aren't contained in the flags
self.world.warmStarting = settings.enableWarmStarting
self.world.continuousPhysics = settings.enableContinuous
self.world.subStepping = settings.enableSubStepping
# Reset the collision points
self.points = []
# Tell Box2D to step
t_step = time()
self.world.Step(timeStep, settings.velocityIterations,
settings.positionIterations)
self.world.ClearForces()
t_step = time() - t_step
# Update the debug draw settings so that the vertices will be properly
# converted to screen coordinates
t_draw = time()
if renderer is not None:
renderer.StartDraw()
self.world.DrawDebugData()
# If the bomb is frozen, get rid of it.
if self.bomb and not self.bomb.awake:
self.world.DestroyBody(self.bomb)
self.bomb = None
# Take care of additional drawing (fps, mouse joint, slingshot bomb,
# contact points)
if renderer:
# If there's a mouse joint, draw the connection between the object
# and the current pointer position.
if self.mouseJoint:
p1 = renderer.to_screen(self.mouseJoint.anchorB)
p2 = renderer.to_screen(self.mouseJoint.target)
renderer.DrawPoint(p1, settings.pointSize,
self.colors['mouse_point'])
renderer.DrawPoint(p2, settings.pointSize,
self.colors['mouse_point'])
renderer.DrawSegment(p1, p2, self.colors['joint_line'])
# Draw the slingshot bomb
if self.bombSpawning:
renderer.DrawPoint(renderer.to_screen(self.bombSpawnPoint),
settings.pointSize, self.colors['bomb_center'])
renderer.DrawSegment(renderer.to_screen(self.bombSpawnPoint),
renderer.to_screen(self.mouseWorld),
self.colors['bomb_line'])
# Draw each of the contact points in different colors.
if self.settings.drawContactPoints:
for point in self.points:
if point['state'] == b2_addState:
renderer.DrawPoint(renderer.to_screen(point['position']),
settings.pointSize,
self.colors['contact_add'])
elif point['state'] == b2_persistState:
renderer.DrawPoint(renderer.to_screen(point['position']),
settings.pointSize,
self.colors['contact_persist'])
if settings.drawContactNormals:
for point in self.points:
p1 = renderer.to_screen(point['position'])
p2 = renderer.axisScale * point['normal'] + p1
renderer.DrawSegment(p1, p2, self.colors['contact_normal'])
renderer.EndDraw()
t_draw = time() - t_draw
t_draw = max(b2_epsilon, t_draw)
t_step = max(b2_epsilon, t_step)
try:
self.t_draws.append(1.0 / t_draw)
except:
pass
else:
if len(self.t_draws) > 2:
self.t_draws.pop(0)
try:
self.t_steps.append(1.0 / t_step)
except:
pass
else:
if len(self.t_steps) > 2:
self.t_steps.pop(0)
if settings.drawFPS:
self.Print("Combined FPS %d" % self.fps)
if settings.drawStats:
self.Print("bodies=%d contacts=%d joints=%d proxies=%d" %
(self.world.bodyCount, self.world.contactCount,
self.world.jointCount, self.world.proxyCount))
self.Print("hz %d vel/pos iterations %d/%d" %
(settings.hz, settings.velocityIterations,
settings.positionIterations))
if self.t_draws and self.t_steps:
self.Print("Potential draw rate: %.2f fps Step rate: %.2f Hz"
"" % (sum(self.t_draws) / len(self.t_draws),
sum(self.t_steps) / len(self.t_steps))
)
def ShiftMouseDown(self, p):
"""
Indicates that there was a left click at point p (world coordinates)
with the left shift key being held down.
"""
self.mouseWorld = p
if not self.mouseJoint:
self.SpawnBomb(p)
def MouseDown(self, p):
"""
Indicates that there was a left click at point p (world coordinates)
"""
if self.mouseJoint is not None:
return
# Create a mouse joint on the selected body (assuming it's dynamic)
# Make a small box.
aabb = b2AABB(lowerBound=p - (0.001, 0.001),
upperBound=p + (0.001, 0.001))
# Query the world for overlapping shapes.
query = fwQueryCallback(p)
self.world.QueryAABB(query, aabb)
if query.fixture:
body = query.fixture.body
# A body was selected, create the mouse joint
self.mouseJoint = self.world.CreateMouseJoint(
bodyA=self.groundbody,
bodyB=body,
target=p,
maxForce=1000.0 * body.mass)
body.awake = True
def MouseUp(self, p):
"""
Left mouse button up.
"""
if self.mouseJoint:
self.world.DestroyJoint(self.mouseJoint)
self.mouseJoint = None
if self.bombSpawning:
self.CompleteBombSpawn(p)
def MouseMove(self, p):
"""
Mouse moved to point p, in world coordinates.
"""
self.mouseWorld = p
if self.mouseJoint:
self.mouseJoint.target = p
def SpawnBomb(self, worldPt):
"""
Begins the slingshot bomb by recording the initial position.
Once the user drags the mouse and releases it, then
CompleteBombSpawn will be called and the actual bomb will be
released.
"""
self.bombSpawnPoint = worldPt.copy()
self.bombSpawning = True
def CompleteBombSpawn(self, p):
"""
Create the slingshot bomb based on the two points
(from the worldPt passed to SpawnBomb to p passed in here)
"""
if not self.bombSpawning:
return
multiplier = 30.0
vel = self.bombSpawnPoint - p
vel *= multiplier
self.LaunchBomb(self.bombSpawnPoint, vel)
self.bombSpawning = False
def LaunchBomb(self, position, velocity):
"""
A bomb is a simple circle which has the specified position and velocity.
position and velocity must be b2Vec2's.
"""
if self.bomb:
self.world.DestroyBody(self.bomb)
self.bomb = None
self.bomb = self.world.CreateDynamicBody(
allowSleep=True,
position=position,
linearVelocity=velocity,
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=0.3),
density=20,
restitution=0.1)
)
def LaunchRandomBomb(self):
"""
Create a new bomb and launch it at the testbed.
"""
p = b2Vec2(b2Random(-15.0, 15.0), 30.0)
v = -5.0 * p
self.LaunchBomb(p, v)
def SimulationLoop(self):
"""
The main simulation loop. Don't override this, override Step instead.
"""
# Reset the text line to start the text from the top
self.textLine = self.TEXTLINE_START
# Draw the name of the test running
self.Print(self.name, (127, 127, 255))
if self.description:
# Draw the name of the test running
for s in self.description.split('\n'):
self.Print(s, (127, 255, 127))
# Do the main physics step
self.Step(self.settings)
def ConvertScreenToWorld(self, x, y):
"""
Return a b2Vec2 in world coordinates of the passed in screen
coordinates x, y
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def DrawStringAt(self, x, y, str, color=(229, 153, 153, 255)):
"""
Draw some text, str, at screen coordinates (x, y).
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def Print(self, str, color=(229, 153, 153, 255)):
"""
Draw some text at the top status lines
and advance to the next line.
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def PreSolve(self, contact, old_manifold):
"""
This is a critical function when there are many contacts in the world.
It should be optimized as much as possible.
"""
if not (self.settings.drawContactPoints or
self.settings.drawContactNormals or self.using_contacts):
return
elif len(self.points) > self.settings.maxContactPoints:
return
manifold = contact.manifold
if manifold.pointCount == 0:
return
state1, state2 = b2GetPointStates(old_manifold, manifold)
if not state2:
return
worldManifold = contact.worldManifold
# TODO: find some way to speed all of this up.
self.points.extend([dict(fixtureA=contact.fixtureA,
fixtureB=contact.fixtureB,
position=worldManifold.points[i],
normal=worldManifold.normal.copy(),
state=state2[i],
)
for i, point in enumerate(state2)])
# These can/should be implemented in the test subclass: (Step() also if necessary)
# See empty.py for a simple example.
def BeginContact(self, contact):
pass
def EndContact(self, contact):
pass
def PostSolve(self, contact, impulse):
pass
def FixtureDestroyed(self, fixture):
"""
Callback indicating 'fixture' has been destroyed.
"""
pass
def JointDestroyed(self, joint):
"""
Callback indicating 'joint' has been destroyed.
"""
pass
def Keyboard(self, key):
"""
Callback indicating 'key' has been pressed down.
"""
pass
def KeyboardUp(self, key):
"""
Callback indicating 'key' has been released.
"""
pass
def main(test_class):
"""
Loads the test class and executes it.
"""
print("Loading %s..." % test_class.name)
test = test_class()
if fwSettings.onlyInit:
return
test.run()
if __name__ == '__main__':
print('Please run one of the examples directly. This is just the base for '
'all of the frameworks.')
exit(1)
# Your framework classes should follow this format. If it is the 'foobar'
# framework, then your file should be 'backends/foobar_framework.py' and you
# should have a class 'FoobarFramework' that subclasses FrameworkBase. Ensure
# proper capitalization for portability.
from . import backends
try:
framework_name = '%s_framework' % (fwSettings.backend.lower())
__import__('backends', globals(), fromlist=[framework_name], level=1)
framework_module = getattr(backends, framework_name)
Framework = getattr(framework_module,
'%sFramework' % fwSettings.backend.capitalize())
except Exception as ex:
print('Unable to import the back-end %s: %s' % (fwSettings.backend, ex))
print('Attempting to fall back on the pygame back-end.')
from .backends.pygame_framework import PygameFramework as Framework
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Based on Chris Campbell's tutorial from iforce2d.net:
http://www.iforce2d.net/b2dtut/top-down-car
"""
from .framework import (Framework, Keys, main)
import math
class TDGroundArea(object):
"""
An area on the ground that the car can run over
"""
def __init__(self, friction_modifier):
self.friction_modifier = friction_modifier
class TDTire(object):
def __init__(self, car, max_forward_speed=100.0,
max_backward_speed=-20, max_drive_force=150,
turn_torque=15, max_lateral_impulse=3,
dimensions=(0.5, 1.25), density=1.0,
position=(0, 0)):
world = car.body.world
self.current_traction = 1
self.turn_torque = turn_torque
self.max_forward_speed = max_forward_speed
self.max_backward_speed = max_backward_speed
self.max_drive_force = max_drive_force
self.max_lateral_impulse = max_lateral_impulse
self.ground_areas = []
self.body = world.CreateDynamicBody(position=position)
self.body.CreatePolygonFixture(box=dimensions, density=density)
self.body.userData = {'obj': self}
@property
def forward_velocity(self):
body = self.body
current_normal = body.GetWorldVector((0, 1))
return current_normal.dot(body.linearVelocity) * current_normal
@property
def lateral_velocity(self):
body = self.body
right_normal = body.GetWorldVector((1, 0))
return right_normal.dot(body.linearVelocity) * right_normal
def update_friction(self):
impulse = -self.lateral_velocity * self.body.mass
if impulse.length > self.max_lateral_impulse:
impulse *= self.max_lateral_impulse / impulse.length
self.body.ApplyLinearImpulse(self.current_traction * impulse,
self.body.worldCenter, True)
aimp = 0.1 * self.current_traction * \
self.body.inertia * -self.body.angularVelocity
self.body.ApplyAngularImpulse(aimp, True)
current_forward_normal = self.forward_velocity
current_forward_speed = current_forward_normal.Normalize()
drag_force_magnitude = -2 * current_forward_speed
self.body.ApplyForce(self.current_traction * drag_force_magnitude * current_forward_normal,
self.body.worldCenter, True)
def update_drive(self, keys):
if 'up' in keys:
desired_speed = self.max_forward_speed
elif 'down' in keys:
desired_speed = self.max_backward_speed
else:
return
# find the current speed in the forward direction
current_forward_normal = self.body.GetWorldVector((0, 1))
current_speed = self.forward_velocity.dot(current_forward_normal)
# apply necessary force
force = 0.0
if desired_speed > current_speed:
force = self.max_drive_force
elif desired_speed < current_speed:
force = -self.max_drive_force
else:
return
self.body.ApplyForce(self.current_traction * force * current_forward_normal,
self.body.worldCenter, True)
def update_turn(self, keys):
if 'left' in keys:
desired_torque = self.turn_torque
elif 'right' in keys:
desired_torque = -self.turn_torque
else:
return
self.body.ApplyTorque(desired_torque, True)
def add_ground_area(self, ud):
if ud not in self.ground_areas:
self.ground_areas.append(ud)
self.update_traction()
def remove_ground_area(self, ud):
if ud in self.ground_areas:
self.ground_areas.remove(ud)
self.update_traction()
def update_traction(self):
if not self.ground_areas:
self.current_traction = 1
else:
self.current_traction = 0
mods = [ga.friction_modifier for ga in self.ground_areas]
max_mod = max(mods)
if max_mod > self.current_traction:
self.current_traction = max_mod
class TDCar(object):
vertices = [(1.5, 0.0),
(3.0, 2.5),
(2.8, 5.5),
(1.0, 10.0),
(-1.0, 10.0),
(-2.8, 5.5),
(-3.0, 2.5),
(-1.5, 0.0),
]
tire_anchors = [(-3.0, 0.75),
(3.0, 0.75),
(-3.0, 8.50),
(3.0, 8.50),
]
def __init__(self, world, vertices=None,
tire_anchors=None, density=0.1, position=(0, 0),
**tire_kws):
if vertices is None:
vertices = TDCar.vertices
self.body = world.CreateDynamicBody(position=position)
self.body.CreatePolygonFixture(vertices=vertices, density=density)
self.body.userData = {'obj': self}
self.tires = [TDTire(self, **tire_kws) for i in range(4)]
if tire_anchors is None:
anchors = TDCar.tire_anchors
joints = self.joints = []
for tire, anchor in zip(self.tires, anchors):
j = world.CreateRevoluteJoint(bodyA=self.body,
bodyB=tire.body,
localAnchorA=anchor,
# center of tire
localAnchorB=(0, 0),
enableMotor=False,
maxMotorTorque=1000,
enableLimit=True,
lowerAngle=0,
upperAngle=0,
)
tire.body.position = self.body.worldCenter + anchor
joints.append(j)
def update(self, keys, hz):
for tire in self.tires:
tire.update_friction()
for tire in self.tires:
tire.update_drive(keys)
# control steering
lock_angle = math.radians(40.)
# from lock to lock in 0.5 sec
turn_speed_per_sec = math.radians(160.)
turn_per_timestep = turn_speed_per_sec / hz
desired_angle = 0.0
if 'left' in keys:
desired_angle = lock_angle
elif 'right' in keys:
desired_angle = -lock_angle
front_left_joint, front_right_joint = self.joints[2:4]
angle_now = front_left_joint.angle
angle_to_turn = desired_angle - angle_now
# TODO fix b2Clamp for non-b2Vec2 types
if angle_to_turn < -turn_per_timestep:
angle_to_turn = -turn_per_timestep
elif angle_to_turn > turn_per_timestep:
angle_to_turn = turn_per_timestep
new_angle = angle_now + angle_to_turn
# Rotate the tires by locking the limits:
front_left_joint.SetLimits(new_angle, new_angle)
front_right_joint.SetLimits(new_angle, new_angle)
class TopDownCar (Framework):
name = "Top Down Car"
description = "Keys: accel = w, reverse = s, left = a, right = d"
def __init__(self):
super(TopDownCar, self).__init__()
# Top-down -- no gravity in the screen plane
self.world.gravity = (0, 0)
self.key_map = {Keys.K_w: 'up',
Keys.K_s: 'down',
Keys.K_a: 'left',
Keys.K_d: 'right',
}
# Keep track of the pressed keys
self.pressed_keys = set()
# The walls
boundary = self.world.CreateStaticBody(position=(0, 20))
boundary.CreateEdgeChain([(-30, -30),
(-30, 30),
(30, 30),
(30, -30),
(-30, -30)]
)
# A couple regions of differing traction
self.car = TDCar(self.world)
gnd1 = self.world.CreateStaticBody(userData={'obj': TDGroundArea(0.5)})
fixture = gnd1.CreatePolygonFixture(
box=(9, 7, (-10, 15), math.radians(20)))
# Set as sensors so that the car doesn't collide
fixture.sensor = True
gnd2 = self.world.CreateStaticBody(userData={'obj': TDGroundArea(0.2)})
fixture = gnd2.CreatePolygonFixture(
box=(9, 5, (5, 20), math.radians(-40)))
fixture.sensor = True
def Keyboard(self, key):
key_map = self.key_map
if key in key_map:
self.pressed_keys.add(key_map[key])
else:
super(TopDownCar, self).Keyboard(key)
def KeyboardUp(self, key):
key_map = self.key_map
if key in key_map:
self.pressed_keys.remove(key_map[key])
else:
super(TopDownCar, self).KeyboardUp(key)
def handle_contact(self, contact, began):
# A contact happened -- see if a wheel hit a
# ground area
fixture_a = contact.fixtureA
fixture_b = contact.fixtureB
body_a, body_b = fixture_a.body, fixture_b.body
ud_a, ud_b = body_a.userData, body_b.userData
if not ud_a or not ud_b:
return
tire = None
ground_area = None
for ud in (ud_a, ud_b):
obj = ud['obj']
if isinstance(obj, TDTire):
tire = obj
elif isinstance(obj, TDGroundArea):
ground_area = obj
if ground_area is not None and tire is not None:
if began:
tire.add_ground_area(ground_area)
else:
tire.remove_ground_area(ground_area)
def BeginContact(self, contact):
self.handle_contact(contact, True)
def EndContact(self, contact):
self.handle_contact(contact, False)
def Step(self, settings):
self.car.update(self.pressed_keys, settings.hz)
super(TopDownCar, self).Step(settings)
tractions = [tire.current_traction for tire in self.car.tires]
self.Print('Current tractions: %s' % tractions)
if __name__ == "__main__":
main(TopDownCar)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.