python_code
stringlengths 0
91.3k
|
---|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract and save accuracy to 'stats/accuracy.json'.
The accuracy is extracted from the most recent eventfile.
"""
import json
import os
import numpy as np
import tensorflow as tf
from absl import app, flags
FLAGS = flags.FLAGS
def summary_dict(accuracies):
return {
'last%02d' % x: np.median(accuracies[-x:]) for x in [1, 10, 20, 50]
}
def main(argv):
if len(argv) > 3:
raise app.UsageError('Too many command-line arguments.')
matches = sorted(tf.io.gfile.glob(os.path.join(FLAGS.folder, 'tb/events.out.tfevents.*')))
assert matches, 'No events files found'
tags = set()
accuracies = []
for event_file in matches:
try:
for e in tf.compat.v1.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == f'accuracy/{FLAGS.test}':
accuracies.append(v.simple_value)
break
elif not accuracies:
tags.add(v.tag)
except tf.errors.DataLossError:
continue
assert accuracies, 'No "%s" tag found. Found tags = %s' % (f'accuracy/{FLAGS.test}', tags)
target_dir = os.path.join(FLAGS.folder, 'stats')
target_file = os.path.join(target_dir, 'accuracy.json')
tf.io.gfile.makedirs(target_dir)
with tf.io.gfile.GFile(target_file, 'w') as f:
json.dump(summary_dict(accuracies), f, sort_keys=True, indent=4)
print('Saved: %s' % target_file)
if __name__ == '__main__':
flags.DEFINE_string('folder', '', 'Folder containing tb events directory.')
flags.DEFINE_string('test', 'test', 'Name of test set to extract accuracy for.')
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import objax
from shared.data.fsl import DATASETS
def save(it, fn='/tmp/delme.png'):
d = next(it)
open(fn, 'wb').write(objax.util.image.to_png(d['image'].numpy().transpose((2, 0, 1))))
return d['label'].numpy()
data = DATASETS()['domainnet64_infograph-0']()
it = iter(data.train.parse(1))
print(save(it))
# And repeat to overwrite delme.png with next image and so on.
# Also you can view images (and it automatically refreshes) using "eog delme.png"
print(save(it))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import objax
from shared.data.core import DATA_DIR, DataSet, record_parse
# filename = 'office3164_amazon-train.tfrecord'
filename = 'domainnet64_infograph-train.tfrecord'
size = 64, 64, 3
d = DataSet.from_files([os.path.join(DATA_DIR, filename)], size, parse_fn=record_parse)
d = d.parse(1).batch(256)
v = next(iter(d))
m = v['image'].numpy()
m = m.reshape((16, 16, *size))
m = m.transpose((4, 0, 2, 1, 3))
m = m.reshape((size[2], 16 * size[0], 16 * size[1]))
open('/tmp/delme.png', 'wb').write(objax.util.image.to_png(m))
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import softmax
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class MCD(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.c1: objax.Module = self.model[-1]
self.c2: objax.Module = objax.nn.Linear(self.c1.w.value.shape[0], nclass)
self.gen: objax.Module = self.model[:-1]
self.opt1 = objax.optimizer.Momentum(self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
self.opt2 = objax.optimizer.Momentum(self.c1.vars('c1') + self.c2.vars('c2'))
self.opt3 = objax.optimizer.Momentum(self.gen.vars())
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def get_two_outputs(v):
feat = self.gen(v, training=True)
return self.c1(feat), self.c2(feat)
def loss_function_phase1(x, y):
s1, s2 = get_two_outputs(x[:, 0])
xes = (objax.functional.loss.cross_entropy_logits(s1, y).mean() +
objax.functional.loss.cross_entropy_logits(s2, y).mean())
return xes, {'losses/xe': xes}
def loss_function_phase2(x, u, y):
saved = self.gen.vars().tensors()
s1, s2 = get_two_outputs(x[:, 0])
t1, t2 = get_two_outputs(u[:, 0])
self.gen.vars().assign(saved)
xes = (objax.functional.loss.cross_entropy_logits(s1, y).mean() +
objax.functional.loss.cross_entropy_logits(s2, y).mean())
dis = jn.abs(softmax(t1) - softmax(t2)).mean()
return xes - dis, {'losses/xe2': xes, 'losses/dis2': dis}
def loss_function_phase3(u):
t1, t2 = get_two_outputs(u[:, 0])
dis = jn.abs(softmax(t1) - softmax(t2)).mean()
return dis, {'losses/dis3': dis}
gv1 = objax.GradValues(loss_function_phase1, self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
gv2 = objax.GradValues(loss_function_phase2, self.c1.vars('c1') + self.c2.vars('c2'))
gv3 = objax.GradValues(loss_function_phase3, self.gen.vars())
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v1 = gv1(sx, sy)
self.opt1(lr, objax.functional.parallel.pmean(g))
g, v2 = gv2(sx, tu, sy)
self.opt2(lr, objax.functional.parallel.pmean(g))
v3 = {}
for _ in range(self.params.wu):
g, v = gv3(tu)
for k, val in v[1].items():
v3[k] = v3.get(k, 0) + val / self.params.wu
self.opt3(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v1[1], **v2[1], **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (unlabeled always first)
module = MCD(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('wu', 1, 'Iteration for phase3 (unlabeled weight loss for G).')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx = jn.split(logit, (2 * sx.shape[0],))[0]
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class FixMatchDA(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_model = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx_weak = logit[:2 * sx.shape[0]:2]
logit_weak, logit_strong = logit[::2], logit[1::2]
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_weak)
p_data = self.stats.p_data(sy.mean(0))
p_model = self.stats.p_model(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_model)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean()
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wu * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'monitors/confidence_ratio': confidence_ratio,
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_model)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (unlabeled always first)
module = FixMatchDA(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatch(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatch(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NoisyStudent.
"""
import os
import sys
from typing import Callable
import jax
import numpy as np
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from tqdm import tqdm
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.augment.augment import AugmentPoolBase
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class NoisyStudent(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
train_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy):
c, h, w = sx.shape[-3:]
logit_sx = self.model(sx.reshape((-1, c, h, w)), training=True)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
# jit eval op used only for saving prediction files, avoids batch padding
self.eval_op_jit = objax.Jit(eval_op, static_argnums=(1,))
class PseudoLabeler(AugmentPoolBase):
def __init__(self, data: AugmentPoolBase, pseudo_label_file: str, threshold: float, uratio: float = 1):
with open(pseudo_label_file, 'rb') as f:
self.pseudo_label = np.load(f)
self.has_pseudo_label = self.pseudo_label.max(axis=1) > threshold
self.data = data
self.uratio = uratio
# assert uratio == 1, 'Not implemented'
def stop(self):
self.data.stop()
def __iter__(self) -> dict:
batch = []
batch_size = None
for d in self.data:
batch_size = batch_size or d['sx']['image'].shape[0]
for p, index in enumerate(d['sx']['index']):
batch.append(dict(index=index, label=d['sx']['label'][p], image=d['sx']['image'][p]))
for p, index in enumerate(d['tu']['index']):
if self.has_pseudo_label[index]:
batch.append(dict(index=index, label=self.pseudo_label[index], image=d['tu']['image'][p]))
np.random.shuffle(batch)
while len(batch) >= batch_size:
current_batch, batch = batch[:batch_size], batch[batch_size:]
d['sx'] = dict(image=np.stack([x['image'] for x in current_batch]),
label=np.stack([x['label'] for x in current_batch]),
index=np.stack([x['index'] for x in current_batch]))
yield d
def main(argv):
del argv
assert FLAGS.id, f'You must specify a --id for the run'
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = NoisyStudent(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
if FLAGS.teacher_id != '':
pseudo_label_logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.teacher_id)
pseudo_label_file = os.path.join(pseudo_label_logdir, 'predictions.npy')
print('Using pseudo label file ', pseudo_label_file)
else:
pseudo_label_file = FLAGS.pseudo_label_file
logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.id)
prediction_file = os.path.join(logdir, 'predictions.npy')
assert not os.path.exists(prediction_file), f'The prediction file "{prediction_file}" already exists, ' \
f'remove it if you want to overwrite it.'
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
if pseudo_label_file != '':
train = PseudoLabeler(train, pseudo_label_file, FLAGS.pseudo_label_th, FLAGS.uratio)
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
predictions = []
for batch in tqdm(target.train.parse().batch(FLAGS.batch).nchw(),
desc=f'Evaluating {FLAGS.target}', leave=False):
predictions.append(module.eval_op_jit(batch['image']._numpy(), 0))
predictions = np.concatenate(predictions)
with open(prediction_file, 'wb') as f:
np.save(f, predictions)
print('Saved target predictions to', prediction_file)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('pseudo_label_th', 0.9, 'Pseudo-label threshold.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_string('id', '', 'Id of the experiments.')
flags.DEFINE_string('pseudo_label_file', '', 'Prediction file to read.')
flags.DEFINE_string('teacher_id', '', 'Exp id of teacher. Overrides pseudo_label_file if set.')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Optional
import jax.numpy as jn
import numpy as np
from objax.jaxboard import Summary
from tqdm import tqdm
from shared.train import TrainableModule
class TrainableDAModule(TrainableModule):
def eval(self, summary: Summary, epoch: int, test: Dict[str, Iterable], valid: Optional[Iterable] = None):
def get_accuracy(dataset: Iterable):
accuracy, total, batch = 0, 0, None
for data in tqdm(dataset, leave=False, desc='Evaluating'):
x, y, domain = data['image']._numpy(), data['label']._numpy(), int(data['domain']._numpy())
total += x.shape[0]
batch = batch or x.shape[0]
if x.shape[0] != batch:
# Pad the last batch if it's smaller than expected (must divide properly on GPUs).
x = np.concatenate([x] + [x[-1:]] * (batch - x.shape[0]))
p = self.eval_op(x, domain)[:y.shape[0]]
accuracy += (np.argmax(p, axis=1) == data['label'].numpy()).sum()
return accuracy / total if total else 0
test_accuracy = {key: get_accuracy(value) for key, value in test.items()}
to_print = []
for key, value in sorted(test_accuracy.items()):
summary.scalar('accuracy/%s' % key, 100 * value)
to_print.append('Acccuracy/%s %.2f' % (key, summary['accuracy/%s' % key]()))
print('Epoch %-4d Loss %.2f %s' % (epoch + 1, summary['losses/xe'](), ' '.join(to_print)))
def train_step(self, summary, data, step):
kv, probe = self.train_op(step,
data['sx']['image'], data['sx']['label'],
data['tu']['image'], data.get('probe'))
if 'probe_callback' in data:
data['probe_callback'](jn.concatenate(probe))
for k, v in kv.items():
v = v[0]
if jn.isnan(v):
raise ValueError('NaN', k)
summary.scalar(k, float(v))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import multiprocessing
from time import sleep
from typing import List, Tuple
import numpy as np
import objax
import tensorflow as tf
from absl.flags import FLAGS
from shared.data.augment import ctaugment
from shared.data.augment.augment import AugmentPoolBase
from shared.data.augment.core import get_tf_augment
from shared.data.augment.ctaugment import CTAugment
from shared.data.core import DataSet
from shared.data.core import Numpyfier
from shared.data.merger import DataMerger
from shared.util import StoppableThread
class MixData(AugmentPoolBase):
def __init__(self,
labeled_source: DataSet,
unlabeled_target: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
weak, strong = tuple(get_tf_augment(ai, size=labeled_source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tu = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled_source, unlabeled_target))
sx = Numpyfier(sx.batch(batch).one_hot(nclass).prefetch(16))
tu = Numpyfier(tu.batch(batch * uratio).prefetch(16))
self.train = DataMerger((sx, tu))
def __iter__(self) -> dict:
for sx, tu in self.train:
yield dict(sx=sx, tu=tu)
class CTAData(AugmentPoolBase):
def __init__(self,
labeled_source: DataSet,
unlabeled_target: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
a, kwargs = a[:2], {k: v for k, v in (x.split('=') for x in a[2:])}
h, w, c = labeled_source.image_shape
para = FLAGS.para_augment
probe_shape = (para, batch, c, h, w)
sx_shape = (para, batch, 2, c, h, w)
tu_shape = (para, batch * uratio, 2, c, h, w)
del h, w, c
self.shapes = probe_shape, sx_shape, tu_shape
self.check_mem_requirements()
self.probe, self.sx, self.tu = self.get_np_arrays(self.shapes)
self.pool = multiprocessing.Pool(para)
self.probe_period = int(kwargs.get('probe', 1))
self.cta = CTAugment(int(kwargs.get('depth', 2)),
float(kwargs.get('th', 0.8)),
float(kwargs.get('decay', 0.99)))
self.to_schedule = collections.deque()
self.deque = collections.deque()
self.free = list(range(para))
self.thread = StoppableThread(target=self.scheduler)
self.thread.start()
weak, strong = tuple(get_tf_augment(ai, size=labeled_source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tu = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled_source, unlabeled_target))
sx = Numpyfier(sx.batch(batch).one_hot(nclass).prefetch(16))
tu = Numpyfier(tu.batch(batch * uratio).prefetch(16))
self.train = DataMerger((sx, tu))
def stop(self):
self.thread.stop()
def scheduler(self):
while not self.thread.stopped():
sleep(0.0001)
while self.to_schedule:
d, idx, do_probe = self.to_schedule.popleft()
self.sx[idx] = d['sx']['image']
self.tu[idx] = d['tu']['image']
worker_args = (idx, self.shapes, do_probe, self.cta)
self.deque.append((d, idx, self.pool.apply_async(self.worker, worker_args)))
@staticmethod
def worker(idx: int, shapes: List[Tuple[int, ...]], do_probe: bool, cta: CTAugment):
def cutout_policy():
return cta.policy(probe=False) + [ctaugment.OP('cutout', (1,))]
probe, sx_array, tu_array = (arr[idx] for arr in CTAData.get_np_arrays(shapes))
sx = objax.util.image.nhwc(sx_array)
tu = objax.util.image.nhwc(tu_array)
nchw = objax.util.image.nchw
sx_strong = np.stack([ctaugment.apply(sx[i, 1], cutout_policy()) for i in range(sx.shape[0])])
tu_strong = np.stack([ctaugment.apply(tu[i, 1], cutout_policy()) for i in range(tu.shape[0])])
sx_array[:] = nchw(np.stack([sx[:, 0], sx_strong], axis=1))
tu_array[:] = nchw(np.stack([tu[:, 0], tu_strong], axis=1))
if not do_probe:
return
policy_list = [cta.policy(probe=True) for _ in range(sx.shape[0])]
probe[:] = nchw(np.stack([ctaugment.apply(sx[i, 0], policy)
for i, policy in enumerate(policy_list)]))
return policy_list
def update_rates(self, policy, label, y_probe):
w1 = 1 - 0.5 * np.abs(y_probe - label).sum(1)
for p in range(w1.shape[0]):
self.cta.update_rates(policy[p], w1[p])
def __iter__(self):
for i, (sx, tu) in enumerate(self.train):
if not self.free:
while not self.deque:
sleep(0.0001)
d, idx, pd = self.deque.popleft()
self.free.append(idx)
policy = pd.get()
if policy:
d['probe'] = np.copy(self.probe[idx])
d['policy'] = policy
d['probe_callback'] = functools.partial(self.update_rates, d['policy'], d['sx']['label'])
d['sx']['image'][:] = self.sx[idx]
d['tu']['image'][:] = self.tu[idx]
yield d
self.to_schedule.append((dict(sx=sx, tu=tu), self.free.pop(), (i % self.probe_period) == 0))
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import softmax
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class MCD(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.c1: objax.Module = self.model[-1]
self.c2: objax.Module = objax.nn.Linear(self.c1.w.value.shape[0], nclass)
self.gen: objax.Module = self.model[:-1]
self.opt1 = objax.optimizer.Momentum(self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
self.opt2 = objax.optimizer.Momentum(self.c1.vars('c1') + self.c2.vars('c2'))
self.opt3 = objax.optimizer.Momentum(self.gen.vars())
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def get_two_outputs(v):
feat = self.gen(v, training=True)
return self.c1(feat), self.c2(feat)
def loss_function_phase1(x, y):
s1, s2 = get_two_outputs(x[:, 0])
xes = (objax.functional.loss.cross_entropy_logits(s1, y).mean() +
objax.functional.loss.cross_entropy_logits(s2, y).mean())
return xes, {'losses/xe': xes}
def loss_function_phase2(x, u, y):
saved = self.gen.vars().tensors()
s1, s2 = get_two_outputs(x[:, 0])
t1, t2 = get_two_outputs(u[:, 0])
self.gen.vars().assign(saved)
xes = (objax.functional.loss.cross_entropy_logits(s1, y).mean() +
objax.functional.loss.cross_entropy_logits(s2, y).mean())
dis = jn.abs(softmax(t1) - softmax(t2)).mean()
return xes - dis, {'losses/xe2': xes, 'losses/dis2': dis}
def loss_function_phase3(u):
t1, t2 = get_two_outputs(u[:, 0])
dis = jn.abs(softmax(t1) - softmax(t2)).mean()
return dis, {'losses/dis3': dis}
gv1 = objax.GradValues(loss_function_phase1, self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
gv2 = objax.GradValues(loss_function_phase2, self.c1.vars('c1') + self.c2.vars('c2'))
gv3 = objax.GradValues(loss_function_phase3, self.gen.vars())
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v1 = gv1(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)))
self.opt1(lr, objax.functional.parallel.pmean(g))
g, v2 = gv2(jn.concatenate((sx, tx)), tu, jn.concatenate((sy, ty)))
self.opt2(lr, objax.functional.parallel.pmean(g))
v3 = {}
for _ in range(self.params.wu):
g, v = gv3(tu)
for k, val in v[1].items():
v3[k] = v3.get(k, 0) + val / self.params.wu
self.opt3(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v1[1], **v2[1], **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = MCD(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('wu', 1, 'Iteration for phase3 (unlabeled weight loss for G).')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx = jn.split(logit, (2 * sx.shape[0],))[0]
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)), tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class FixMatchDA(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_model = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx_weak = logit[:2 * sx.shape[0]:2]
logit_weak, logit_strong = logit[::2], logit[1::2]
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_weak)
p_data = self.stats.p_data(sy.mean(0))
p_model = self.stats.p_model(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_model)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean()
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wu * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'monitors/confidence_ratio': confidence_ratio,
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_model)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)), tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = FixMatchDA(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatch(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tx, ty, tu, progress):
sx_domain = jn.ones(2 * sx.shape[0], dtype=jn.int32)
tx_domain = jn.zeros(2 * tx.shape[0], dtype=jn.int32)
tu_domain = jn.zeros(2 * tu.shape[0], dtype=jn.int32)
saved_vars = self.model.vars().tensors()
logit_bn = self.model(jn.concatenate((sx, tx)).reshape((-1, *sx.shape[2:])), training=True,
domain=jn.concatenate((sx_domain, tx_domain)))
logit_bn_sx, logit_bn_tx = jn.split(logit_bn, 2)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tx, tu)).reshape((-1, *sx.shape[2:]))
logit = self.model(xu, training=True, domain=jn.concatenate((sx_domain, tx_domain, tu_domain)))
logit_sx, logit_tx, logit_tu = jn.split(logit, (2 * sx.shape[0], 2 * (sx.shape[0] + tx.shape[0])))
logit_sx += (logit_bn_sx - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_tx += (logit_bn_tx - logit_tx) * objax.random.uniform(logit_tx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tx_weak, logit_tx_strong = logit_tx[::2], logit_tx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xet = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_tx_weak, ty).mean() +
objax.functional.loss.cross_entropy_logits(logit_tx_strong, ty).mean())
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + xet + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xet': xet,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_sx).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tx, ty, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = AdaMatch(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NoisyStudent.
"""
import os
import sys
from typing import Callable
import jax
import numpy as np
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from tqdm import tqdm
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.data.augment.augment import AugmentPoolBase
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class NoisyStudent(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
train_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy):
c, h, w = sx.shape[-3:]
logit_sx = self.model(sx.reshape((-1, c, h, w)), training=True)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
# jit eval op used only for saving prediction files, avoids batch padding
self.eval_op_jit = objax.Jit(eval_op, static_argnums=(1,))
class PseudoLabeler(AugmentPoolBase):
def __init__(self, data: AugmentPoolBase, pseudo_label_file: str, threshold: float, uratio: float = 1):
with open(pseudo_label_file, 'rb') as f:
self.pseudo_label = np.load(f)
self.has_pseudo_label = self.pseudo_label.max(axis=1) > threshold
self.data = data
self.uratio = uratio
def stop(self):
self.data.stop()
def __iter__(self) -> dict:
batch = []
batch_size = None
for d in self.data:
batch_size = batch_size or d['sx']['image'].shape[0]
for p, index in enumerate(d['sx']['index']):
batch.append(dict(index=index, label=d['sx']['label'][p], image=d['sx']['image'][p]))
for p, index in enumerate(d['tx']['index']):
batch.append(dict(index=index, label=d['tx']['label'][p], image=d['tx']['image'][p]))
for p, index in enumerate(d['tu']['index']):
if self.has_pseudo_label[index]:
batch.append(dict(index=index, label=self.pseudo_label[index], image=d['tu']['image'][p]))
np.random.shuffle(batch)
while len(batch) >= batch_size:
current_batch, batch = batch[:batch_size], batch[batch_size:]
d['sx'] = dict(image=np.stack([x['image'] for x in current_batch]),
label=np.stack([x['label'] for x in current_batch]),
index=np.stack([x['index'] for x in current_batch]))
yield d
def main(argv):
del argv
assert FLAGS.id, f'You must specify a --id for the run'
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = NoisyStudent(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
if FLAGS.teacher_id != '':
pseudo_label_logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.teacher_id)
pseudo_label_file = os.path.join(pseudo_label_logdir, 'predictions.npy')
print('Using pseudo label file ', pseudo_label_file)
else:
pseudo_label_file = FLAGS.pseudo_label_file
logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.id)
prediction_file = os.path.join(logdir, 'predictions.npy')
assert not os.path.exists(prediction_file), f'The prediction file "{prediction_file}" already exists, ' \
f'remove it if you want to overwrite it.'
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
if pseudo_label_file != '':
train = PseudoLabeler(train, pseudo_label_file, FLAGS.pseudo_label_th, FLAGS.uratio)
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
predictions = []
for batch in tqdm(target_unlabeled.train.parse().batch(FLAGS.batch).nchw(),
desc=f'Evaluating unlabeled {FLAGS.target}', leave=False):
predictions.append(module.eval_op_jit(batch['image']._numpy(), 0))
predictions = np.concatenate(predictions)
with open(prediction_file, 'wb') as f:
np.save(f, predictions)
print('Saved target predictions to', prediction_file)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('pseudo_label_th', 0.9, 'Pseudo-label threshold.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('id', '', 'Id of the experiments.')
flags.DEFINE_string('pseudo_label_file', '', 'Prediction file to read.')
flags.DEFINE_string('teacher_id', '', 'Exp id of teacher. Overrides pseudo_label_file if set.')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Optional
import numpy as np
from jax import numpy as jn
from objax.jaxboard import Summary
from tqdm import tqdm
from shared.train import TrainableModule
class TrainableSSDAModule(TrainableModule):
def eval(self, summary: Summary, epoch: int, test: Dict[str, Iterable], valid: Optional[Iterable] = None):
def get_accuracy(dataset: Iterable):
accuracy, total, batch = 0, 0, None
for data in tqdm(dataset, leave=False, desc='Evaluating'):
x, y, domain = data['image']._numpy(), data['label']._numpy(), int(data['domain']._numpy())
total += x.shape[0]
batch = batch or x.shape[0]
if x.shape[0] != batch:
# Pad the last batch if it's smaller than expected (must divide properly on GPUs).
x = np.concatenate([x] + [x[-1:]] * (batch - x.shape[0]))
p = self.eval_op(x, domain)[:y.shape[0]]
accuracy += (np.argmax(p, axis=1) == data['label'].numpy()).sum()
return accuracy / total if total else 0
test_accuracy = {key: get_accuracy(value) for key, value in test.items()}
to_print = []
for key, value in sorted(test_accuracy.items()):
summary.scalar('accuracy/%s' % key, 100 * value)
to_print.append('Acccuracy/%s %.2f' % (key, summary['accuracy/%s' % key]()))
print('Epoch %-4d Loss %.2f %s' % (epoch + 1, summary['losses/xe'](), ' '.join(to_print)))
def train_step(self, summary, data, step):
kv, probe = self.train_op(step,
data['sx']['image'], data['sx']['label'],
data['tx']['image'], data['tx']['label'],
data['tu']['image'], data.get('probe'))
if 'probe_callback' in data:
data['probe_callback'](jn.concatenate(probe))
for k, v in kv.items():
v = v[0]
if jn.isnan(v):
raise ValueError('NaN', k)
summary.scalar(k, float(v))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import multiprocessing
from time import sleep
from typing import List, Tuple
import numpy as np
import objax
import tensorflow as tf
from absl.flags import FLAGS
from shared.data.augment import ctaugment
from shared.data.augment.augment import AugmentPoolBase
from shared.data.augment.core import get_tf_augment
from shared.data.augment.ctaugment import CTAugment
from shared.data.core import DataSet
from shared.data.core import Numpyfier
from shared.data.merger import DataMerger
from shared.util import StoppableThread
class MixData(AugmentPoolBase):
def __init__(self,
labeled_source: DataSet,
labeled_target: DataSet,
unlabeled_target: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
weak, strong = tuple(get_tf_augment(ai, size=labeled_source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tx, tu = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled_source, labeled_target, unlabeled_target))
sx = Numpyfier(sx.batch(batch).one_hot(nclass).prefetch(16))
tx = Numpyfier(tx.batch(batch).one_hot(nclass).prefetch(16))
tu = Numpyfier(tu.batch(batch * uratio).prefetch(16))
self.train = DataMerger((sx, tx, tu))
def __iter__(self) -> dict:
for sx, tx, tu in self.train:
yield dict(sx=sx, tx=tx, tu=tu)
class CTAData(AugmentPoolBase):
def __init__(self,
labeled_source: DataSet,
labeled_target: DataSet,
unlabeled_target: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
a, kwargs = a[:2], {k: v for k, v in (x.split('=') for x in a[2:])}
h, w, c = labeled_source.image_shape
para = FLAGS.para_augment
probe_shape = (para, batch * 2, c, h, w)
sx_shape = (para, batch, 2, c, h, w)
tx_shape = (para, batch, 2, c, h, w)
tu_shape = (para, batch * uratio, 2, c, h, w)
del h, w, c
self.shapes = probe_shape, sx_shape, tx_shape, tu_shape
self.check_mem_requirements()
self.probe, self.sx, self.tx, self.tu = self.get_np_arrays(self.shapes)
self.pool = multiprocessing.Pool(para)
self.probe_period = int(kwargs.get('probe', 1))
self.cta = CTAugment(int(kwargs.get('depth', 2)),
float(kwargs.get('th', 0.8)),
float(kwargs.get('decay', 0.99)))
self.to_schedule = collections.deque()
self.deque = collections.deque()
self.free = list(range(para))
self.thread = StoppableThread(target=self.scheduler)
self.thread.start()
weak, strong = tuple(get_tf_augment(ai, size=labeled_source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tx, tu = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled_source, labeled_target, unlabeled_target))
sx = Numpyfier(sx.batch(batch).one_hot(nclass).prefetch(16))
tx = Numpyfier(tx.batch(batch).one_hot(nclass).prefetch(16))
tu = Numpyfier(tu.batch(batch * uratio).prefetch(16))
self.train = DataMerger((sx, tx, tu))
def stop(self):
self.thread.stop()
def scheduler(self):
while not self.thread.stopped():
sleep(0.0001)
while self.to_schedule:
d, idx, do_probe = self.to_schedule.popleft()
self.sx[idx] = d['sx']['image']
self.tx[idx] = d['tx']['image']
self.tu[idx] = d['tu']['image']
worker_args = (idx, self.shapes, do_probe, self.cta)
self.deque.append((d, idx, self.pool.apply_async(self.worker, worker_args)))
@staticmethod
def worker(idx: int, shapes: List[Tuple[int, ...]], do_probe: bool, cta: CTAugment):
def cutout_policy():
return cta.policy(probe=False) + [ctaugment.OP('cutout', (1,))]
probe, sx_array, tx_array, tu_array = (arr[idx] for arr in CTAData.get_np_arrays(shapes))
sx = objax.util.image.nhwc(sx_array)
tx = objax.util.image.nhwc(tx_array)
tu = objax.util.image.nhwc(tu_array)
stx = np.concatenate((sx, tx))
nchw = objax.util.image.nchw
sx_strong = np.stack([ctaugment.apply(sx[i, 1], cutout_policy()) for i in range(sx.shape[0])])
tx_strong = np.stack([ctaugment.apply(tx[i, 1], cutout_policy()) for i in range(tx.shape[0])])
tu_strong = np.stack([ctaugment.apply(tu[i, 1], cutout_policy()) for i in range(tu.shape[0])])
sx_array[:] = nchw(np.stack([sx[:, 0], sx_strong], axis=1))
tx_array[:] = nchw(np.stack([tx[:, 0], tx_strong], axis=1))
tu_array[:] = nchw(np.stack([tu[:, 0], tu_strong], axis=1))
if not do_probe:
return
policy_list = [cta.policy(probe=True) for _ in range(stx.shape[0])]
probe[:] = nchw(np.stack([ctaugment.apply(stx[i, 0], policy)
for i, policy in enumerate(policy_list)]))
return policy_list
def update_rates(self, policy, label, y_probe):
w1 = 1 - 0.5 * np.abs(y_probe - label).sum(1)
for p in range(w1.shape[0]):
self.cta.update_rates(policy[p], w1[p])
def __iter__(self):
for i, (sx, tx, tu) in enumerate(self.train):
if not self.free:
while not self.deque:
sleep(0.0001)
d, idx, pd = self.deque.popleft()
self.free.append(idx)
policy = pd.get()
if policy:
d['probe'] = np.copy(self.probe[idx])
d['policy'] = policy
d['probe_callback'] = functools.partial(self.update_rates, d['policy'],
np.concatenate((d['sx']['label'], d['tx']['label'])))
d['sx']['image'][:] = self.sx[idx]
d['tx']['image'][:] = self.tx[idx]
d['tu']['image'][:] = self.tu[idx]
yield d
self.to_schedule.append((dict(sx=sx, tx=tx, tu=tu), self.free.pop(), (i % self.probe_period) == 0))
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for TensorFlow Agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import setuptools
setuptools.setup(
name='batch-ppo',
version='1.4.0',
description=(
'Efficient TensorFlow implementation of ' +
'Proximal Policy Optimization.'),
license='Apache 2.0',
url='http://github.com/google-research/batch-ppo',
install_requires=[
'tensorflow',
'gym',
'ruamel.yaml',
],
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
],
)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main package of TensorFlow agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import algorithms
from . import scripts
from . import tools
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attribute dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents.tools import attr_dict
class AttrDictTest(tf.test.TestCase):
def test_construct_from_dict(self):
initial = dict(foo=13, bar=42)
obj = attr_dict.AttrDict(initial)
self.assertEqual(13, obj.foo)
self.assertEqual(42, obj.bar)
def test_construct_from_kwargs(self):
obj = attr_dict.AttrDict(foo=13, bar=42)
self.assertEqual(13, obj.foo)
self.assertEqual(42, obj.bar)
def test_has_attribute(self):
obj = attr_dict.AttrDict(foo=13)
self.assertTrue('foo' in obj)
self.assertFalse('bar' in obj)
def test_access_default(self):
obj = attr_dict.AttrDict()
self.assertEqual(None, obj.foo)
def test_access_magic(self):
obj = attr_dict.AttrDict()
with self.assertRaises(AttributeError):
obj.__getstate__ # pylint: disable=pointless-statement
def test_immutable_create(self):
obj = attr_dict.AttrDict()
with self.assertRaises(RuntimeError):
obj.foo = 42
def test_immutable_modify(self):
obj = attr_dict.AttrDict(foo=13)
with self.assertRaises(RuntimeError):
obj.foo = 42
def test_immutable_unlocked(self):
obj = attr_dict.AttrDict()
with obj.unlocked:
obj.foo = 42
self.assertEqual(42, obj.foo)
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for environment wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from agents import tools
class ExternalProcessTest(tf.test.TestCase):
def test_close_no_hang_after_init(self):
constructor = functools.partial(
tools.MockEnvironment,
observ_shape=(2, 3), action_shape=(2,),
min_duration=2, max_duration=2)
env = tools.wrappers.ExternalProcess(constructor)
env.close()
def test_close_no_hang_after_step(self):
constructor = functools.partial(
tools.MockEnvironment,
observ_shape=(2, 3), action_shape=(2,),
min_duration=5, max_duration=5)
env = tools.wrappers.ExternalProcess(constructor)
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
env.close()
def test_reraise_exception_in_init(self):
constructor = MockEnvironmentCrashInInit
env = tools.wrappers.ExternalProcess(constructor)
with self.assertRaises(Exception):
env.step(env.action_space.sample())
def test_reraise_exception_in_step(self):
constructor = functools.partial(
MockEnvironmentCrashInStep, crash_at_step=3)
env = tools.wrappers.ExternalProcess(constructor)
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
with self.assertRaises(Exception):
env.step(env.action_space.sample())
class MockEnvironmentCrashInInit(object):
"""Raise an error when instantiated."""
def __init__(self, *unused_args, **unused_kwargs):
raise RuntimeError()
class MockEnvironmentCrashInStep(tools.MockEnvironment):
"""Raise an error after specified number of steps in an episode."""
def __init__(self, crash_at_step):
super(MockEnvironmentCrashInStep, self).__init__(
observ_shape=(2, 3), action_shape=(2,),
min_duration=crash_at_step + 1, max_duration=crash_at_step + 1)
self._crash_at_step = crash_at_step
def step(self, *args, **kwargs): # pylint: disable=arguments-differ
transition = super(MockEnvironmentCrashInStep, self).step(*args, **kwargs)
if self.steps[-1] == self._crash_at_step:
raise RuntimeError()
return transition
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap a dictionary to access keys as attributes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
class AttrDict(dict):
"""Wrap a dictionary to access keys as attributes."""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
super(AttrDict, self).__setattr__('_mutable', False)
def __getattr__(self, key):
# Do not provide None for unimplemented magic attributes.
if key.startswith('__'):
raise AttributeError
return self.get(key, None)
def __setattr__(self, key, value):
if not self._mutable:
message = "Cannot set attribute '{}'.".format(key)
message += " Use 'with obj.unlocked:' scope to set attributes."
raise RuntimeError(message)
if key.startswith('__'):
raise AttributeError("Cannot set magic attribute '{}'".format(key))
self[key] = value
@property
@contextlib.contextmanager
def unlocked(self):
super(AttrDict, self).__setattr__('_mutable', True)
yield
super(AttrDict, self).__setattr__('_mutable', False)
def copy(self):
return type(self)(super(AttrDict, self).copy())
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of tools for managing nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from agents.tools import nested
class ZipTest(tf.test.TestCase):
def test_scalar(self):
self.assertEqual(42, nested.zip(42))
self.assertEqual((13, 42), nested.zip(13, 42))
def test_empty(self):
self.assertEqual({}, nested.zip({}, {}))
def test_base_case(self):
self.assertEqual((1, 2, 3), nested.zip(1, 2, 3))
def test_shallow_list(self):
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
result = nested.zip(a, b, c)
self.assertEqual([(1, 4, 7), (2, 5, 8), (3, 6, 9)], result)
def test_shallow_tuple(self):
a = (1, 2, 3)
b = (4, 5, 6)
c = (7, 8, 9)
result = nested.zip(a, b, c)
self.assertEqual(((1, 4, 7), (2, 5, 8), (3, 6, 9)), result)
def test_shallow_dict(self):
a = {'a': 1, 'b': 2, 'c': 3}
b = {'a': 4, 'b': 5, 'c': 6}
c = {'a': 7, 'b': 8, 'c': 9}
result = nested.zip(a, b, c)
self.assertEqual({'a': (1, 4, 7), 'b': (2, 5, 8), 'c': (3, 6, 9)}, result)
def test_single(self):
a = [[1, 2], 3]
result = nested.zip(a)
self.assertEqual(a, result)
def test_mixed_structures(self):
a = [(1, 2), 3, {'foo': [4]}]
b = [(5, 6), 7, {'foo': [8]}]
result = nested.zip(a, b)
self.assertEqual([((1, 5), (2, 6)), (3, 7), {'foo': [(4, 8)]}], result)
def test_different_types(self):
a = [1, 2, 3]
b = 'a b c'.split()
result = nested.zip(a, b)
self.assertEqual([(1, 'a'), (2, 'b'), (3, 'c')], result)
def test_use_type_of_first(self):
a = (1, 2, 3)
b = [4, 5, 6]
c = [7, 8, 9]
result = nested.zip(a, b, c)
self.assertEqual(((1, 4, 7), (2, 5, 8), (3, 6, 9)), result)
def test_namedtuple(self):
Foo = collections.namedtuple('Foo', 'value')
foo, bar = Foo(42), Foo(13)
self.assertEqual(Foo((42, 13)), nested.zip(foo, bar))
class MapTest(tf.test.TestCase):
def test_scalar(self):
self.assertEqual(42, nested.map(lambda x: x, 42))
def test_empty(self):
self.assertEqual({}, nested.map(lambda x: x, {}))
def test_shallow_list(self):
self.assertEqual([2, 4, 6], nested.map(lambda x: 2 * x, [1, 2, 3]))
def test_shallow_dict(self):
data = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
self.assertEqual(data, nested.map(lambda x: x, data))
def test_mixed_structure(self):
structure = [(1, 2), 3, {'foo': [4]}]
result = nested.map(lambda x: 2 * x, structure)
self.assertEqual([(2, 4), 6, {'foo': [8]}], result)
def test_mixed_types(self):
self.assertEqual([14, 'foofoo'], nested.map(lambda x: x * 2, [7, 'foo']))
def test_multiple_lists(self):
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
result = nested.map(lambda x, y, z: x + y + z, a, b, c)
self.assertEqual([12, 15, 18], result)
def test_namedtuple(self):
Foo = collections.namedtuple('Foo', 'value')
foo, bar = [Foo(42)], [Foo(13)]
function = nested.map(lambda x, y: (y, x), foo, bar)
self.assertEqual([Foo((13, 42))], function)
function = nested.map(lambda x, y: x + y, foo, bar)
self.assertEqual([Foo(55)], function)
class FlattenTest(tf.test.TestCase):
def test_scalar(self):
self.assertEqual((42,), nested.flatten(42))
def test_empty(self):
self.assertEqual((), nested.flatten({}))
def test_base_case(self):
self.assertEqual((1,), nested.flatten(1))
def test_convert_type(self):
self.assertEqual((1, 2, 3), nested.flatten([1, 2, 3]))
def test_mixed_structure(self):
self.assertEqual((1, 2, 3, 4), nested.flatten([(1, 2), 3, {'foo': [4]}]))
def test_value_ordering(self):
self.assertEqual((1, 2, 3), nested.flatten({'a': 1, 'b': 2, 'c': 3}))
class FilterTest(tf.test.TestCase):
def test_empty(self):
self.assertEqual({}, nested.filter(lambda x: True, {}))
self.assertEqual({}, nested.filter(lambda x: False, {}))
def test_base_case(self):
self.assertEqual((), nested.filter(lambda x: False, 1))
def test_single_dict(self):
predicate = lambda x: x % 2 == 0
data = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
self.assertEqual({'b': 2, 'd': 4}, nested.filter(predicate, data))
def test_multiple_lists(self):
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
predicate = lambda *args: any(x % 4 == 0 for x in args)
result = nested.filter(predicate, a, b, c)
self.assertEqual([(1, 4, 7), (2, 5, 8)], result)
def test_multiple_dicts(self):
a = {'a': 1, 'b': 2, 'c': 3}
b = {'a': 4, 'b': 5, 'c': 6}
c = {'a': 7, 'b': 8, 'c': 9}
predicate = lambda *args: any(x % 4 == 0 for x in args)
result = nested.filter(predicate, a, b, c)
self.assertEqual({'a': (1, 4, 7), 'b': (2, 5, 8)}, result)
def test_mixed_structure(self):
predicate = lambda x: x % 2 == 0
data = [(1, 2), 3, {'foo': [4]}]
self.assertEqual([(2,), {'foo': [4]}], nested.filter(predicate, data))
def test_remove_empty_containers(self):
data = [(1, 2, 3), 4, {'foo': [5, 6], 'bar': 7}]
self.assertEqual([], nested.filter(lambda x: False, data))
def test_namedtuple(self):
Foo = collections.namedtuple('Foo', 'value1, value2')
self.assertEqual(Foo(1, None), nested.filter(lambda x: x == 1, Foo(1, 2)))
def test_namedtuple_multiple(self):
Foo = collections.namedtuple('Foo', 'value1, value2')
foo = Foo(1, 2)
bar = Foo(2, 3)
result = nested.filter(lambda x, y: x + y > 3, foo, bar)
self.assertEqual(Foo(None, (2, 3)), result)
def test_namedtuple_nested(self):
Foo = collections.namedtuple('Foo', 'value1, value2')
foo = Foo(1, [1, 2, 3])
self.assertEqual(Foo(None, [2, 3]), nested.filter(lambda x: x > 1, foo))
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-graph simulation step of a vectorized algorithm with environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents.tools import streaming_mean
def simulate(batch_env, algo, log=True, reset=False):
"""Simulation step of a vectorized algorithm with in-graph environments.
Integrates the operations implemented by the algorithm and the environments
into a combined operation.
Args:
batch_env: In-graph batch environment.
algo: Algorithm instance implementing required operations.
log: Tensor indicating whether to compute and return summaries.
reset: Tensor causing all environments to reset.
Returns:
Tuple of tensors containing done flags for the current episodes, possibly
intermediate scores for the episodes, and a summary tensor.
"""
def _define_begin_episode(agent_indices):
"""Reset environments, intermediate scores and durations for new episodes.
Args:
agent_indices: Tensor containing batch indices starting an episode.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
zero_scores = tf.zeros_like(agent_indices, tf.float32)
zero_durations = tf.zeros_like(agent_indices)
reset_ops = [
batch_env.reset(agent_indices),
tf.scatter_update(score, agent_indices, zero_scores),
tf.scatter_update(length, agent_indices, zero_durations)]
with tf.control_dependencies(reset_ops):
return algo.begin_episode(agent_indices)
def _define_step():
"""Request actions from the algorithm and apply them to the environments.
Increments the lengths of all episodes and increases their scores by the
current reward. After stepping the environments, provides the full
transition tuple to the algorithm.
Returns:
Summary tensor.
"""
prevob = batch_env.observ + 0 # Ensure a copy of the variable value.
agent_indices = tf.range(len(batch_env))
action, step_summary = algo.perform(agent_indices, prevob)
action.set_shape(batch_env.action.shape)
with tf.control_dependencies([batch_env.simulate(action)]):
add_score = score.assign_add(batch_env.reward)
inc_length = length.assign_add(tf.ones(len(batch_env), tf.int32))
with tf.control_dependencies([add_score, inc_length]):
agent_indices = tf.range(len(batch_env))
experience_summary = algo.experience(
agent_indices, prevob, batch_env.action, batch_env.reward,
batch_env.done, batch_env.observ)
return tf.summary.merge([step_summary, experience_summary])
def _define_end_episode(agent_indices):
"""Notify the algorithm of ending episodes.
Also updates the mean score and length counters used for summaries.
Args:
agent_indices: Tensor holding batch indices that end their episodes.
Returns:
Summary tensor.
"""
assert agent_indices.shape.ndims == 1
submit_score = mean_score.submit(tf.gather(score, agent_indices))
submit_length = mean_length.submit(
tf.cast(tf.gather(length, agent_indices), tf.float32))
with tf.control_dependencies([submit_score, submit_length]):
return algo.end_episode(agent_indices)
def _define_summaries():
"""Reset the average score and duration, and return them as summary.
Returns:
Summary string.
"""
score_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_score.count, tf.bool)),
lambda: tf.summary.scalar('mean_score', mean_score.clear()), str)
length_summary = tf.cond(
tf.logical_and(log, tf.cast(mean_length.count, tf.bool)),
lambda: tf.summary.scalar('mean_length', mean_length.clear()), str)
return tf.summary.merge([score_summary, length_summary])
with tf.name_scope('simulate'):
log = tf.convert_to_tensor(log)
reset = tf.convert_to_tensor(reset)
with tf.variable_scope('simulate_temporary'):
score = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.float32),
trainable=False, name='score')
length = tf.Variable(
lambda: tf.zeros(len(batch_env), dtype=tf.int32),
trainable=False, name='length')
mean_score = streaming_mean.StreamingMean((), tf.float32)
mean_length = streaming_mean.StreamingMean((), tf.float32)
agent_indices = tf.cond(
reset,
lambda: tf.range(len(batch_env)),
lambda: tf.cast(tf.where(batch_env.done)[:, 0], tf.int32))
begin_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_begin_episode(agent_indices), str)
with tf.control_dependencies([begin_episode]):
step = _define_step()
with tf.control_dependencies([step]):
agent_indices = tf.cast(tf.where(batch_env.done)[:, 0], tf.int32)
end_episode = tf.cond(
tf.cast(tf.shape(agent_indices)[0], tf.bool),
lambda: _define_end_episode(agent_indices), str)
with tf.control_dependencies([end_episode]):
summary = tf.summary.merge([
_define_summaries(), begin_episode, step, end_episode])
with tf.control_dependencies([summary]):
done, score = tf.identity(batch_env.done), tf.identity(score)
return done, score, summary
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for reinforcement learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import nested
from . import wrappers
from .attr_dict import AttrDict
from .batch_env import BatchEnv
from .count_weights import count_weights
from .in_graph_batch_env import InGraphBatchEnv
from .in_graph_env import InGraphEnv
from .loop import Loop
from .mock_algorithm import MockAlgorithm
from .mock_environment import MockEnvironment
from .simulate import simulate
from .streaming_mean import StreamingMean
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combine multiple environments to step them in batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class BatchEnv(object):
"""Combine multiple environments to step them in batch."""
def __init__(self, envs, blocking):
"""Combine multiple environments to step them in batch.
To step environments in parallel, environments must support a
`blocking=False` argument to their step and reset functions that makes them
return callables instead to receive the result at a later time.
Args:
envs: List of environments.
blocking: Step environments after another rather than in parallel.
Raises:
ValueError: Environments have different observation or action spaces.
"""
self._envs = envs
self._blocking = blocking
observ_space = self._envs[0].observation_space
if not all(env.observation_space == observ_space for env in self._envs):
raise ValueError('All environments must use the same observation space.')
action_space = self._envs[0].action_space
if not all(env.action_space == action_space for env in self._envs):
raise ValueError('All environments must use the same observation space.')
def __len__(self):
"""Number of combined environments."""
return len(self._envs)
def __getitem__(self, index):
"""Access an underlying environment by index."""
return self._envs[index]
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name one of the wrapped environments.
"""
return getattr(self._envs[0], name)
def step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
observs, rewards, dones, infos = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return observ, reward, done, info
def reset(self, indices=None):
"""Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations.
"""
if indices is None:
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
return observ
def close(self):
"""Send close messages to the external process and join them."""
for env in self._envs:
if hasattr(env, 'close'):
env.close()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock algorithm for testing reinforcement learning code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class MockAlgorithm(object):
"""Produce random actions and empty summaries."""
def __init__(self, envs):
"""Produce random actions and empty summaries.
Args:
envs: List of in-graph environments.
"""
self._envs = envs
def begin_episode(self, unused_agent_indices):
return tf.constant('')
def perform(self, agent_indices, unused_observ):
shape = (tf.shape(agent_indices)[0],) + self._envs[0].action_space.shape
low = self._envs[0].action_space.low
high = self._envs[0].action_space.high
action = tf.random_uniform(shape) * (high - low) + low
return action, tf.constant('')
def experience(self, unused_agent_indices, *unused_transition):
return tf.constant('')
def end_episode(self, unused_agent_indices):
return tf.constant('')
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for OpenAI Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import sys
import traceback
import gym
import gym.spaces
import numpy as np
import tensorflow as tf
class AutoReset(object):
"""Automatically reset environment when the episode is done."""
def __init__(self, env):
self._env = env
self._done = True
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._done:
observ, reward, done, info = self._env.reset(), 0.0, False, {}
else:
observ, reward, done, info = self._env.step(action)
self._done = done
return observ, reward, done, info
def reset(self):
self._done = False
return self._env.reset()
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class RandomStart(object):
"""Perform random number of random actions at the start of the episode."""
def __init__(self, env, max_steps):
self._env = env
self._max_steps = max_steps
def __getattr__(self, name):
return getattr(self._env, name)
def reset(self):
observ = self._env.reset()
random_steps = np.random.randint(0, self._max_steps)
for _ in range(random_steps):
action = self._env.action_space.sample()
observ, unused_reward, done, unused_info = self._env.step(action)
if done:
tf.logging.warning('Episode ended during random start.')
return self.reset()
return observ
class FrameHistory(object):
"""Augment the observation with past observations."""
def __init__(self, env, past_indices, flatten):
"""Augment the observation with past observations.
Implemented as a Numpy ring buffer holding the necessary past observations.
Args:
env: OpenAI Gym environment to wrap.
past_indices: List of non-negative integers indicating the time offsets
from the current time step of observations to include.
flatten: Concatenate the past observations rather than stacking them.
Raises:
KeyError: The current observation is not included in the indices.
"""
if 0 not in past_indices:
raise KeyError('Past indices should include 0 for the current frame.')
self._env = env
self._past_indices = past_indices
self._step = 0
self._buffer = None
self._capacity = max(past_indices) + 1
self._flatten = flatten
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low = np.repeat(low[None, ...], len(self._past_indices), 0)
high = np.repeat(high[None, ...], len(self._past_indices), 0)
if self._flatten:
low = np.reshape(low, (-1,) + low.shape[2:])
high = np.reshape(high, (-1,) + high.shape[2:])
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
self._buffer[self._step % self._capacity] = observ
observ = self._select_frames()
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
self._buffer = np.repeat(observ[None, ...], self._capacity, 0)
self._step = 0
return self._select_frames()
def _select_frames(self):
indices = [
(self._step - index) % self._capacity for index in self._past_indices]
observ = self._buffer[indices]
if self._flatten:
observ = np.reshape(observ, (-1,) + observ.shape[2:])
return observ
class FrameDelta(object):
"""Convert the observation to a difference from the previous observation."""
def __init__(self, env):
self._env = env
self._last = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low, high = low - high, high - low
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
observ, reward, done, info = self._env.step(action)
delta = observ - self._last
self._last = observ
return delta, reward, done, info
def reset(self):
observ = self._env.reset()
self._last = observ
return observ
class RangeNormalize(object):
"""Normalize the specialized observation and action ranges to [-1, 1]."""
def __init__(self, env, observ=None, action=None):
self._env = env
self._should_normalize_observ = (
observ is not False and self._is_finite(self._env.observation_space))
if observ is True and not self._should_normalize_observ:
raise ValueError('Cannot normalize infinite observation range.')
if observ is None and not self._should_normalize_observ:
tf.logging.info('Not normalizing infinite observation range.')
self._should_normalize_action = (
action is not False and self._is_finite(self._env.action_space))
if action is True and not self._should_normalize_action:
raise ValueError('Cannot normalize infinite action range.')
if action is None and not self._should_normalize_action:
tf.logging.info('Not normalizing infinite action range.')
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
space = self._env.observation_space
if not self._should_normalize_observ:
return space
low, high = -np.ones(space.shape), np.ones(space.shape)
return gym.spaces.Box(low, high, dtype=np.float32)
@property
def action_space(self):
space = self._env.action_space
if not self._should_normalize_action:
return space
low, high = -np.ones(space.shape), np.ones(space.shape)
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
if self._should_normalize_action:
action = self._denormalize_action(action)
observ, reward, done, info = self._env.step(action)
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ
def _denormalize_action(self, action):
min_ = self._env.action_space.low
max_ = self._env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self._env.observation_space.low
max_ = self._env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ
def _is_finite(self, space):
return np.isfinite(space.low).all() and np.isfinite(space.high).all()
class ClipAction(object):
"""Clip out of range actions to the action space of the environment."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = self._env.action_space.shape
low, high = -np.inf * np.ones(shape), np.inf * np.ones(shape)
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
action_space = self._env.action_space
action = np.clip(action, action_space.low, action_space.high)
return self._env.step(action)
class LimitDuration(object):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
"""Convert data types of an OpenAI Gym environment to 32 bit.
Args:
env: OpenAI Gym environment.
"""
self._env = env
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
"""Reset the environment and convert the resulting observation.
Returns:
Converted observation.
"""
observ = self._env.reset()
observ = self._convert_observ(observ)
return observ
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class CacheSpaces(object):
"""Cache observation and action space to not recompute them repeatedly."""
def __init__(self, env):
"""Cache observation and action space to not recompute them repeatedly.
Args:
env: OpenAI Gym environment.
"""
self._env = env
self._observation_space = self._env.observation_space
self._action_space = self._env.action_space
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the training loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents import tools
class LoopTest(tf.test.TestCase):
def test_report_every_step(self):
step = tf.Variable(0, False, dtype=tf.int32, name='step')
loop = tools.Loop(None, step)
loop.add_phase(
'phase_1', done=True, score=0, summary='', steps=1, report_every=3)
# Step: 0 1 2 3 4 5 6 7 8
# Report: x x x
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
scores = loop.run(sess, saver=None, max_step=9)
next(scores)
self.assertEqual(3, sess.run(step))
next(scores)
self.assertEqual(6, sess.run(step))
next(scores)
self.assertEqual(9, sess.run(step))
def test_phases_feed(self):
score = tf.placeholder(tf.float32, [])
loop = tools.Loop(None)
loop.add_phase(
'phase_1', done=True, score=score, summary='', steps=1, report_every=1,
log_every=None, checkpoint_every=None, feed={score: 1})
loop.add_phase(
'phase_2', done=True, score=score, summary='', steps=3, report_every=1,
log_every=None, checkpoint_every=None, feed={score: 2})
loop.add_phase(
'phase_3', done=True, score=score, summary='', steps=2, report_every=1,
log_every=None, checkpoint_every=None, feed={score: 3})
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
scores = list(loop.run(sess, saver=None, max_step=15))
self.assertAllEqual([1, 2, 2, 2, 3, 3, 1, 2, 2, 2, 3, 3, 1, 2, 2], scores)
def test_average_score_over_phases(self):
loop = tools.Loop(None)
loop.add_phase(
'phase_1', done=True, score=1, summary='', steps=1, report_every=2)
loop.add_phase(
'phase_2', done=True, score=2, summary='', steps=2, report_every=5)
# Score: 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2 1 2
# Report 1: x x x
# Report 2: x x
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
scores = list(loop.run(sess, saver=None, max_step=17))
self.assertAllEqual([1, 2, 1, 2, 1], scores)
def test_not_done(self):
step = tf.Variable(0, False, dtype=tf.int32, name='step')
done = tf.equal((step + 1) % 2, 0)
score = tf.cast(step, tf.float32)
loop = tools.Loop(None, step)
loop.add_phase(
'phase_1', done, score, summary='', steps=1, report_every=3)
# Score: 0 1 2 3 4 5 6 7 8
# Done: x x x x
# Report: x x x
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
scores = list(loop.run(sess, saver=None, max_step=9))
self.assertAllEqual([1, 4, 7], scores)
def test_not_done_batch(self):
step = tf.Variable(0, False, dtype=tf.int32, name='step')
done = tf.equal([step % 3, step % 4], 0)
score = tf.cast([step, step ** 2], tf.float32)
loop = tools.Loop(None, step)
loop.add_phase(
'phase_1', done, score, summary='', steps=1, report_every=8)
# Step: 0 2 4 6
# Score 1: 0 2 4 6
# Done 1: x x
# Score 2: 0 4 16 32
# Done 2: x x
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
scores = list(loop.run(sess, saver=None, max_step=8))
self.assertEqual(8, sess.run(step))
self.assertAllEqual([(0 + 0 + 16 + 6) / 4], scores)
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute operations in a loop and coordinate logging and checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tensorflow as tf
from agents.tools import streaming_mean
_Phase = collections.namedtuple(
'Phase',
'name, writer, op, batch, steps, feed, report_every, log_every,'
'checkpoint_every')
class Loop(object):
"""Execute operations in a loop and coordinate logging and checkpoints.
Supports multiple phases, that define their own operations to run, and
intervals for reporting scores, logging summaries, and storing checkpoints.
All class state is stored in-graph to properly recover from checkpoints.
"""
def __init__(self, logdir, step=None, log=None, report=None, reset=None):
"""Execute operations in a loop and coordinate logging and checkpoints.
The step, log, report, and report arguments will get created if not
provided. Reset is used to indicate switching to a new phase, so that the
model can start a new computation in case its computation is split over
multiple training steps.
Args:
logdir: Will contain checkpoints and summaries for each phase.
step: Variable of the global step (optional).
log: Tensor indicating to the model to compute summary tensors.
report: Tensor indicating to the loop to report the current mean score.
reset: Tensor indicating to the model to start a new computation.
"""
self._logdir = logdir
self._step = (
tf.Variable(0, False, name='global_step') if step is None else step)
self._log = tf.placeholder(tf.bool) if log is None else log
self._report = tf.placeholder(tf.bool) if report is None else report
self._reset = tf.placeholder(tf.bool) if reset is None else reset
self._phases = []
def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
"""
done = tf.convert_to_tensor(done, tf.bool)
score = tf.convert_to_tensor(score, tf.float32)
summary = tf.convert_to_tensor(summary, tf.string)
feed = feed or {}
if done.shape.ndims is None or score.shape.ndims is None:
raise ValueError("Rank of 'done' and 'score' tensors must be known.")
writer = self._logdir and tf.summary.FileWriter(
os.path.join(self._logdir, name), tf.get_default_graph(),
flush_secs=60)
op = self._define_step(done, score, summary)
batch = 1 if score.shape.ndims == 0 else score.shape[0].value
self._phases.append(_Phase(
name, writer, op, batch, int(steps), feed, report_every,
log_every, checkpoint_every))
def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
"""
global_step = sess.run(self._step)
steps_made = 1
while True:
if max_step and global_step >= max_step:
break
phase, epoch, steps_in = self._find_current_phase(global_step)
phase_step = epoch * phase.steps + steps_in
if steps_in % phase.steps < steps_made:
message = '\n' + ('-' * 50) + '\n'
message += 'Phase {} (phase step {}, global step {}).'
tf.logging.info(message.format(phase.name, phase_step, global_step))
# Populate book keeping tensors.
phase.feed[self._reset] = (steps_in < steps_made)
phase.feed[self._log] = (
phase.writer and
self._is_every_steps(phase_step, phase.batch, phase.log_every))
phase.feed[self._report] = (
self._is_every_steps(phase_step, phase.batch, phase.report_every))
summary, mean_score, global_step, steps_made = sess.run(
phase.op, phase.feed)
if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every):
self._store_checkpoint(sess, saver, global_step)
if self._is_every_steps(phase_step, phase.batch, phase.report_every):
yield mean_score
if summary and phase.writer:
# We want smaller phases to catch up at the beginnig of each epoch so
# that their graphs are aligned.
longest_phase = max(phase.steps for phase in self._phases)
summary_step = epoch * longest_phase + steps_in
phase.writer.add_summary(summary, summary_step)
def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen.
"""
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock environment for testing reinforcement learning code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import gym.spaces
import numpy as np
class MockEnvironment(object):
"""Generate random agent input and keep track of statistics."""
def __init__(self, observ_shape, action_shape, min_duration, max_duration):
"""Generate random agent input and keep track of statistics.
Args:
observ_shape: Shape for the random observations.
action_shape: Shape for the action space.
min_duration: Minimum number of steps per episode.
max_duration: Maximum number of steps per episode.
Attributes:
steps: List of actual simulated lengths for all episodes.
durations: List of decided lengths for all episodes.
"""
self._observ_shape = observ_shape
self._action_shape = action_shape
self._min_duration = min_duration
self._max_duration = max_duration
self._random = np.random.RandomState(0)
self.steps = []
self.durations = []
@property
def observation_space(self):
low = np.zeros(self._observ_shape)
high = np.ones(self._observ_shape)
return gym.spaces.Box(low, high, dtype=np.float32)
@property
def action_space(self):
low = np.zeros(self._action_shape)
high = np.ones(self._action_shape)
return gym.spaces.Box(low, high, dtype=np.float32)
@property
def unwrapped(self):
return self
def step(self, action):
assert self.action_space.contains(action)
assert self.steps[-1] < self.durations[-1]
self.steps[-1] += 1
observ = self._current_observation()
reward = self._current_reward()
done = self.steps[-1] >= self.durations[-1]
info = {}
return observ, reward, done, info
def reset(self):
duration = self._random.randint(self._min_duration, self._max_duration + 1)
self.steps.append(0)
self.durations.append(duration)
return self._current_observation()
def _current_observation(self):
return self._random.uniform(0, 1, self._observ_shape)
def _current_reward(self):
return self._random.uniform(-1, 1)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Count learnable parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import tensorflow as tf
def count_weights(scope=None, exclude=None, graph=None):
"""Count learnable parameters.
Args:
scope: Restrict the count to a variable scope.
exclude: Regex to match variable names to exclude.
graph: Operate on a graph other than the current default graph.
Returns:
Number of learnable parameters as integer.
"""
if scope:
scope = scope if scope.endswith('/') else scope + '/'
graph = graph or tf.get_default_graph()
vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if scope:
vars_ = [var for var in vars_ if var.name.startswith(scope)]
if exclude:
exclude = re.compile(exclude)
vars_ = [var for var in vars_ if not exclude.match(var.name)]
shapes = [var.get_shape().as_list() for var in vars_]
return int(sum(np.prod(shape) for shape in shapes))
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch of environments inside the TensorFlow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import tensorflow as tf
class InGraphBatchEnv(object):
"""Batch of environments inside the TensorFlow graph.
The batch of environments will be stepped and reset inside of the graph using
a tf.py_func(). The current batch of observations, actions, rewards, and done
flags are held in according variables.
"""
def __init__(self, batch_env):
"""Batch of environments inside the TensorFlow graph.
Args:
batch_env: Batch environment.
"""
self._batch_env = batch_env
batch_dims = (len(self._batch_env),)
observ_shape = self._parse_shape(self._batch_env.observation_space)
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
action_shape = self._parse_shape(self._batch_env.action_space)
action_dtype = self._parse_dtype(self._batch_env.action_space)
with tf.variable_scope('env_temporary'):
self._observ = tf.Variable(
lambda: tf.zeros(batch_dims + observ_shape, observ_dtype),
name='observ', trainable=False)
self._action = tf.Variable(
lambda: tf.zeros(batch_dims + action_shape, action_dtype),
name='action', trainable=False)
self._reward = tf.Variable(
lambda: tf.zeros(batch_dims, tf.float32),
name='reward', trainable=False)
self._done = tf.Variable(
lambda: tf.cast(tf.ones(batch_dims), tf.bool),
name='done', trainable=False)
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in one of the original environments.
"""
return getattr(self._batch_env, name)
def __len__(self):
"""Number of combined environments."""
return len(self._batch_env)
def __getitem__(self, index):
"""Access an underlying environment by index."""
return self._batch_env[index]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope('environment/simulate'):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, 'action')
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ, reward, done = tf.py_func(
lambda a: self._batch_env.step(a)[:3], [action],
[observ_dtype, tf.float32, tf.bool], name='step')
observ = tf.check_numerics(observ, 'observ')
reward = tf.check_numerics(reward, 'reward')
return tf.group(
self._observ.assign(observ),
self._action.assign(action),
self._reward.assign(reward),
self._done.assign(done))
def reset(self, indices=None):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
if indices is None:
indices = tf.range(len(self._batch_env))
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ = tf.py_func(
self._batch_env.reset, [indices], observ_dtype, name='reset')
observ = tf.check_numerics(observ, 'observ')
reward = tf.zeros_like(indices, tf.float32)
done = tf.zeros_like(indices, tf.bool)
with tf.control_dependencies([
tf.scatter_update(self._observ, indices, observ),
tf.scatter_update(self._reward, indices, reward),
tf.scatter_update(self._done, indices, done)]):
return tf.identity(observ)
@property
def observ(self):
"""Access the variable holding the current observation."""
return self._observ
@property
def action(self):
"""Access the variable holding the last received action."""
return self._action
@property
def reward(self):
"""Access the variable holding the current reward."""
return self._reward
@property
def done(self):
"""Access the variable indicating whether the episode is done."""
return self._done
def close(self):
"""Send close messages to the external process and join them."""
self._batch_env.close()
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the simulation operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents import tools
class SimulateTest(tf.test.TestCase):
def test_done_automatic(self):
batch_env = self._create_test_batch_env((1, 2, 3, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, log=False, reset=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual([True, False, False, False], sess.run(done))
self.assertAllEqual([True, True, False, False], sess.run(done))
self.assertAllEqual([True, False, True, False], sess.run(done))
self.assertAllEqual([True, True, False, True], sess.run(done))
def test_done_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([True, True], sess.run(done))
def test_reset_automatic(self):
batch_env = self._create_test_batch_env((1, 2, 3, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, log=False, reset=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run(done)
self.assertAllEqual([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], batch_env[0].steps)
self.assertAllEqual([2, 2, 2, 2, 2], batch_env[1].steps)
self.assertAllEqual([3, 3, 3, 1], batch_env[2].steps)
self.assertAllEqual([4, 4, 2], batch_env[3].steps)
def test_reset_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done)
sess.run(done)
self.assertAllEqual([1, 2, 2, 2], batch_env[0].steps)
self.assertAllEqual([1, 2, 4], batch_env[1].steps)
def _create_test_batch_env(self, durations):
envs = []
for duration in durations:
env = tools.MockEnvironment(
observ_shape=(2, 3), action_shape=(3,),
min_duration=duration, max_duration=duration)
env = tools.wrappers.ConvertTo32Bit(env)
envs.append(env)
batch_env = tools.BatchEnv(envs, blocking=True)
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the weight counting utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents.tools import count_weights
class CountWeightsTest(tf.test.TestCase):
def test_count_trainable(self):
tf.Variable(tf.zeros((5, 3)), trainable=True)
tf.Variable(tf.zeros((1, 1)), trainable=True)
tf.Variable(tf.zeros((5,)), trainable=True)
self.assertEqual(15 + 1 + 5, count_weights())
def test_ignore_non_trainable(self):
tf.Variable(tf.zeros((5, 3)), trainable=False)
tf.Variable(tf.zeros((1, 1)), trainable=False)
tf.Variable(tf.zeros((5,)), trainable=False)
self.assertEqual(0, count_weights())
def test_trainable_and_non_trainable(self):
tf.Variable(tf.zeros((5, 3)), trainable=True)
tf.Variable(tf.zeros((8, 2)), trainable=False)
tf.Variable(tf.zeros((1, 1)), trainable=True)
tf.Variable(tf.zeros((5,)), trainable=True)
tf.Variable(tf.zeros((3, 1)), trainable=False)
self.assertEqual(15 + 1 + 5, count_weights())
def test_include_scopes(self):
tf.Variable(tf.zeros((3, 2)), trainable=True)
with tf.variable_scope('foo'):
tf.Variable(tf.zeros((5, 2)), trainable=True)
self.assertEqual(6 + 10, count_weights())
def test_restrict_scope(self):
tf.Variable(tf.zeros((3, 2)), trainable=True)
with tf.variable_scope('foo'):
tf.Variable(tf.zeros((5, 2)), trainable=True)
with tf.variable_scope('bar'):
tf.Variable(tf.zeros((1, 2)), trainable=True)
self.assertEqual(10 + 2, count_weights('foo'))
def test_restrict_nested_scope(self):
tf.Variable(tf.zeros((3, 2)), trainable=True)
with tf.variable_scope('foo'):
tf.Variable(tf.zeros((5, 2)), trainable=True)
with tf.variable_scope('bar'):
tf.Variable(tf.zeros((1, 2)), trainable=True)
self.assertEqual(2, count_weights('foo/bar'))
def test_restrict_invalid_scope(self):
tf.Variable(tf.zeros((3, 2)), trainable=True)
with tf.variable_scope('foo'):
tf.Variable(tf.zeros((5, 2)), trainable=True)
with tf.variable_scope('bar'):
tf.Variable(tf.zeros((1, 2)), trainable=True)
self.assertEqual(0, count_weights('bar'))
def test_exclude_by_regex(self):
tf.Variable(tf.zeros((3, 2)), trainable=True)
with tf.variable_scope('foo'):
tf.Variable(tf.zeros((5, 2)), trainable=True)
with tf.variable_scope('bar'):
tf.Variable(tf.zeros((1, 2)), trainable=True)
self.assertEqual(0, count_weights(exclude=r'.*'))
self.assertEqual(6, count_weights(exclude=r'(^|/)foo/.*'))
self.assertEqual(16, count_weights(exclude=r'.*/bar/.*'))
def test_non_default_graph(self):
graph = tf.Graph()
with graph.as_default():
tf.Variable(tf.zeros((5, 3)), trainable=True)
tf.Variable(tf.zeros((8, 2)), trainable=False)
self.assertNotEqual(graph, tf.get_default_graph)
self.assertEqual(15, count_weights(graph=graph))
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Put an OpenAI Gym environment into the TensorFlow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import tensorflow as tf
class InGraphEnv(object):
"""Put an OpenAI Gym environment into the TensorFlow graph.
The environment will be stepped and reset inside of the graph using
tf.py_func(). The current observation, action, reward, and done flag are held
in according variables.
"""
def __init__(self, env):
"""Put an OpenAI Gym environment into the TensorFlow graph.
Args:
env: OpenAI Gym environment.
"""
self._env = env
observ_shape = self._parse_shape(self._env.observation_space)
observ_dtype = self._parse_dtype(self._env.observation_space)
action_shape = self._parse_shape(self._env.action_space)
action_dtype = self._parse_dtype(self._env.action_space)
with tf.name_scope('environment'):
self._observ = tf.Variable(
tf.zeros(observ_shape, observ_dtype), name='observ', trainable=False)
self._action = tf.Variable(
tf.zeros(action_shape, action_dtype), name='action', trainable=False)
self._reward = tf.Variable(
0.0, dtype=tf.float32, name='reward', trainable=False)
self._done = tf.Variable(
True, dtype=tf.bool, name='done', trainable=False)
self._step = tf.Variable(
0, dtype=tf.int32, name='step', trainable=False)
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
def simulate(self, action):
"""Step the environment.
The result of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the action to apply.
Returns:
Operation.
"""
with tf.name_scope('environment/simulate'):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, 'action')
observ_dtype = self._parse_dtype(self._env.observation_space)
observ, reward, done = tf.py_func(
lambda a: self._env.step(a)[:3], [action],
[observ_dtype, tf.float32, tf.bool], name='step')
observ = tf.check_numerics(observ, 'observ')
reward = tf.check_numerics(reward, 'reward')
return tf.group(
self._observ.assign(observ),
self._action.assign(action),
self._reward.assign(reward),
self._done.assign(done),
self._step.assign_add(1))
def reset(self):
"""Reset the environment.
Returns:
Tensor of the current observation.
"""
observ_dtype = self._parse_dtype(self._env.observation_space)
observ = tf.py_func(self._env.reset, [], observ_dtype, name='reset')
observ = tf.check_numerics(observ, 'observ')
with tf.control_dependencies([
self._observ.assign(observ),
self._reward.assign(0),
self._done.assign(False)]):
return tf.identity(observ)
@property
def observ(self):
"""Access the variable holding the current observation."""
return self._observ
@property
def action(self):
"""Access the variable holding the last received action."""
return self._action
@property
def reward(self):
"""Access the variable holding the current reward."""
return self._reward
@property
def done(self):
"""Access the variable indicating whether the episode is done."""
return self._done
@property
def step(self):
"""Access the variable containing total steps of this environment."""
return self._step
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute a streaming estimation of the mean of submitted tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class StreamingMean(object):
"""Compute a streaming estimation of the mean of submitted tensors."""
def __init__(self, shape, dtype):
"""Specify the shape and dtype of the mean to be estimated.
Note that a float mean to zero submitted elements is NaN, while computing
the integer mean of zero elements raises a division by zero error.
Args:
shape: Shape of the mean to compute.
dtype: Data type of the mean to compute.
"""
self._dtype = dtype
self._sum = tf.Variable(lambda: tf.zeros(shape, dtype), False)
self._count = tf.Variable(lambda: 0, trainable=False)
@property
def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype)
@property
def count(self):
"""The number of submitted samples."""
return self._count
def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.assign_add(tf.shape(value)[0]))
def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_dependencies([reset_value, reset_count]):
return tf.identity(value)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating nested tuples, list, and dictionaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Disable linter warning for using `flatten` as argument name.
# pylint: disable=redefined-outer-name
_builtin_zip = zip
_builtin_map = map
_builtin_filter = filter
def zip_(*structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'zip() got unexpected keyword arguments.'
return map(
lambda *x: x if len(x) > 1 else x[0],
*structures,
flatten=flatten)
def map_(function, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The function to apply to the elements of the structure. Receives
one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'map() got unexpected keyword arguments.'
def impl(function, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
args = tuple((impl(function, *x) for x in _builtin_zip(*structures)))
if hasattr(structures[0], '_fields'): # namedtuple
return type(structures[0])(*args)
else: # tuple, list
return type(structures[0])(args)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
merged = {
k: impl(function, *(s[k] for s in structures))
for k in structures[0]}
return type(structures[0])(merged)
return function(*structures)
result = impl(function, *structures)
if flatten:
result = flatten_(result)
return result
def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple.
"""
if isinstance(structure, dict):
if structure:
structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1]
else:
# Zip doesn't work on an the items of an empty dictionary.
structure = ()
if isinstance(structure, (tuple, list)):
result = []
for element in structure:
result += flatten_(element)
return tuple(result)
return (structure,)
def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'filter() got unexpected keyword arguments.'
def impl(predicate, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))
else:
filtered = (impl(predicate, x) for x in structures[0])
# Remove empty containers and construct result structure.
if hasattr(structures[0], '_fields'): # namedtuple
filtered = (x if x != () else None for x in filtered)
return type(structures[0])(*filtered)
else: # tuple, list
filtered = (
x for x in filtered if not isinstance(x, (tuple, list, dict)) or x)
return type(structures[0])(filtered)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = {
k: impl(predicate, *(s[k] for s in structures))
for k in structures[0]}
else:
filtered = {k: impl(predicate, v) for k, v in structures[0].items()}
# Remove empty containers and construct result structure.
filtered = {
k: v for k, v in filtered.items()
if not isinstance(v, (tuple, list, dict)) or v}
return type(structures[0])(filtered)
if len(structures) > 1:
return structures if predicate(*structures) else ()
else:
return structures[0] if predicate(structures[0]) else ()
result = impl(predicate, *structures)
if flatten:
result = flatten_(result)
return result
# pylint: disable=redefined-builtin
zip = zip_
map = map_
flatten = flatten_
filter = filter_
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .ppo import PPO
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import tensorflow as tf
from tensorflow.python.client import device_lib
def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
reinit_nested_vars(variable, indices) for variable in variables])
if indices is None:
return variables.assign(tf.zeros_like(variables))
else:
zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())
return tf.scatter_update(variables, indices, zeros)
def assign_nested_vars(variables, tensors, indices=None):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
assign_nested_vars(variable, tensor)
for variable, tensor in zip(variables, tensors)])
if indices is None:
return variables.assign(tensors)
else:
return tf.scatter_update(variables, indices, tensors)
def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.concat(
[reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount ** window * tf.concat(
[value[:, window:], tf.zeros_like(value[:, -window:])], 1)
return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
def lambda_advantage(reward, value, length, discount, gae_lambda):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discount * next_value - value
advantage = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + gae_lambda * discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads))
return tf.summary.merge(summaries)
def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(var)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
# pylint: disable=redefined-argument-from-local
for name, vars_ in grouped.items():
vars_ = [tf.reshape(var, [-1]) for var in vars_]
vars_ = tf.concat(vars_, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, vars_))
return tf.summary.merge(summaries)
def set_dimension(tensor, axis, value):
"""Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specified.
"""
shape = tensor.shape.as_list()
if shape[axis] not in (value, None):
message = 'Cannot set dimension {} of tensor {} to {}; is already {}.'
raise ValueError(message.format(axis, tensor.name, value, shape[axis]))
shape[axis] = value
tensor.set_shape(shape)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proximal Policy Optimization algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .ppo import PPO
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proximal Policy Optimization agent.
Based on John Schulman's implementation in Python and Theano:
https://github.com/joschu/modular_rl/blob/master/modular_rl/ppo.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from agents import parts
from agents import tools
from agents.algorithms.ppo import utility
class PPO(object):
"""A vectorized implementation of the PPO algorithm by John Schulman."""
def __init__(self, batch_env, step, is_training, should_log, config):
"""Create an instance of the PPO algorithm.
Args:
batch_env: In-graph batch environment.
step: Integer tensor holding the current training step.
is_training: Boolean tensor for whether the algorithm should train.
should_log: Boolean tensor for whether summaries should be returned.
config: Object containing the agent configuration as attributes.
"""
self._batch_env = batch_env
self._step = step
self._is_training = is_training
self._should_log = should_log
self._config = config
self._observ_filter = parts.StreamingNormalize(
self._batch_env.observ[0], center=True, scale=True, clip=5,
name='normalize_observ')
self._reward_filter = parts.StreamingNormalize(
self._batch_env.reward[0], center=False, scale=True, clip=10,
name='normalize_reward')
self._use_gpu = self._config.use_gpu and utility.available_gpus()
policy_params, state = self._initialize_policy()
self._initialize_memory(policy_params)
# Initialize the optimizer and penalty.
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
self._optimizer = self._config.optimizer(self._config.learning_rate)
self._penalty = tf.Variable(
self._config.kl_init_penalty, False, dtype=tf.float32)
# If the policy is stateful, allocate space to store its state.
with tf.variable_scope('ppo_temporary'):
with tf.device('/gpu:0'):
if state is None:
self._last_state = None
else:
var_like = lambda x: tf.Variable(lambda: tf.zeros_like(x), False)
self._last_state = tools.nested.map(var_like, state)
# Remember the action and policy parameters to write into the memory.
with tf.variable_scope('ppo_temporary'):
self._last_action = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_action')
self._last_policy = tools.nested.map(
lambda x: tf.Variable(tf.zeros_like(x[:, 0], False)), policy_params)
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
def _define_experience(self, agent_indices, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.summary.merge([
self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
policy = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_policy)
batch = (observ, action, policy, reward)
append = self._current_episodes.append(batch, agent_indices)
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._num_finished_episodes),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)]), str)
return summary
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
def _define_end_episode(self, agent_indices):
"""Implement the branch of end_episode() entered during training."""
episodes, length = self._current_episodes.data(agent_indices)
space_left = self._config.update_every - self._num_finished_episodes
use_episodes = tf.range(tf.minimum(
tf.shape(agent_indices)[0], space_left))
episodes = tools.nested.map(lambda x: tf.gather(x, use_episodes), episodes)
append = self._finished_episodes.replace(
episodes, tf.gather(length, use_episodes),
use_episodes + self._num_finished_episodes)
with tf.control_dependencies([append]):
increment_index = self._num_finished_episodes.assign_add(
tf.shape(use_episodes)[0])
with tf.control_dependencies([increment_index]):
memory_full = self._num_finished_episodes >= self._config.update_every
return tf.cond(memory_full, self._training, str)
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example configurations using the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-variable
import tensorflow as tf
from agents import algorithms
from agents.scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Environment
normalize_ranges = True
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_output_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
# Optimization
batch_size = 20
chunk_length = 50
return locals()
def cartpole():
"""Configuration for the cart pole classic control task."""
locals().update(default())
# Environment
env = 'CartPole-v1'
max_length = 500
steps = 2e5 # 200k
normalize_ranges = False # The env reports wrong ranges.
# Network
network = networks.feed_forward_categorical
return locals()
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
def cheetah():
"""Configuration for MuJoCo's half cheetah task."""
locals().update(default())
# Environment
env = 'HalfCheetah-v2'
max_length = 1000
steps = 1e7 # 10M
discount = 0.99
return locals()
def walker():
"""Configuration for MuJoCo's walker task."""
locals().update(default())
# Environment
env = 'Walker2d-v2'
max_length = 1000
steps = 1e7 # 10M
return locals()
def hopper():
"""Configuration for MuJoCo's hopper task."""
locals().update(default())
# Environment
env = 'Hopper-v2'
max_length = 1000
steps = 1e7 # 10M
update_every = 60
return locals()
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v2'
max_length = 1000
steps = 2e7 # 20M
return locals()
def humanoid():
"""Configuration for MuJoCo's humanoid task."""
locals().update(default())
# Environment
env = 'Humanoid-v2'
max_length = 1000
steps = 5e7 # 50M
update_every = 60
return locals()
def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for using reinforcement learning algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import re
import ruamel.yaml as yaml
import tensorflow as tf
from agents import tools
def define_simulation_graph(batch_env, algo_cls, config):
"""Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes.
"""
# pylint: disable=unused-variable
step = tf.Variable(0, False, dtype=tf.int32, name='global_step')
is_training = tf.placeholder(tf.bool, name='is_training')
should_log = tf.placeholder(tf.bool, name='should_log')
do_report = tf.placeholder(tf.bool, name='do_report')
force_reset = tf.placeholder(tf.bool, name='force_reset')
algo = algo_cls(batch_env, step, is_training, should_log, config)
done, score, summary = tools.simulate(
batch_env, algo, should_log, force_reset)
message = 'Graph contains {} trainable variables.'
tf.logging.info(message.format(tools.count_weights()))
# pylint: enable=unused-variable
return tools.AttrDict(locals())
def define_batch_env(constructor, num_agents, env_processes):
"""Create environments and apply all desired wrappers.
Args:
constructor: Constructor of an OpenAI gym environment.
num_agents: Number of environments to combine in the batch.
env_processes: Whether to step environment in external processes.
Returns:
In-graph environments object.
"""
with tf.variable_scope('environments'):
if env_processes:
envs = [
tools.wrappers.ExternalProcess(constructor)
for _ in range(num_agents)]
else:
envs = [constructor() for _ in range(num_agents)]
batch_env = tools.BatchEnv(envs, blocking=not env_processes)
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env
def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
"""
sess.run(tf.group(
tf.local_variables_initializer(),
tf.global_variables_initializer()))
if resume and not (logdir or checkpoint):
raise ValueError('Need to specify logdir to resume a checkpoint.')
if logdir:
state = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint = os.path.join(logdir, checkpoint)
if not checkpoint and state and state.model_checkpoint_path:
checkpoint = state.model_checkpoint_path
if checkpoint and resume is False:
message = 'Found unexpected checkpoint when starting a new run.'
raise RuntimeError(message)
if checkpoint:
saver.restore(sess, checkpoint)
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.gfile.FastGFile(config_path, 'r') as file_:
config = yaml.load(file_, Loader=yaml.Loader)
message = 'Resume run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
return config
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executable scripts for reinforcement learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import train
from . import utility
from . import visualize
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to render videos of the Proximal Policy Gradient algorithm.
Command line:
python3 -m agents.scripts.visualize \
--logdir=/path/to/logdir/<time>-<config> --outdir=/path/to/outdir/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import gym
import tensorflow as tf
from agents import tools
from agents.scripts import utility
def _create_environment(config, outdir):
"""Constructor for an instance of the environment.
Args:
config: Object providing configurations via attributes.
outdir: Directory to store videos in.
Raises:
NotImplementedError: For action spaces other than Box and Discrete.
Returns:
Wrapped OpenAI Gym environment.
"""
if isinstance(config.env, str):
env = gym.make(config.env)
else:
env = config.env()
# Ensure that the environment has the specification attribute set as expected
# by the monitor wrapper.
if not hasattr(env, 'spec'):
setattr(env, 'spec', getattr(env, 'spec', None))
if config.max_length:
env = tools.wrappers.LimitDuration(env, config.max_length)
env = gym.wrappers.Monitor(
env, outdir, lambda unused_episode_number: True)
if isinstance(env.action_space, gym.spaces.Box):
env = tools.wrappers.RangeNormalize(env)
env = tools.wrappers.ClipAction(env)
elif isinstance(env.action_space, gym.spaces.Discrete):
env = tools.wrappers.RangeNormalize(env, action=False)
else:
message = "Unsupported action space '{}'".format(type(env.action_space))
raise NotImplementedError(message)
env = tools.wrappers.ConvertTo32Bit(env)
env = tools.wrappers.CacheSpaces(env)
return env
def _define_loop(graph, eval_steps):
"""Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
None, graph.step, graph.should_log, graph.do_report, graph.force_reset)
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=None,
checkpoint_every=None,
feed={graph.is_training: False})
return loop
def visualize(
logdir, outdir, num_agents, num_episodes, checkpoint=None,
env_processes=True):
"""Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate in parallel.
num_episodes: Total number of episodes to simulate.
checkpoint: Checkpoint name to load; defaults to most recent.
env_processes: Whether to step environments in separate processes.
"""
config = utility.load_config(logdir)
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config, outdir),
num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
total_steps = num_episodes * config.max_length
loop = _define_loop(graph, total_steps)
saver = utility.define_saver(
exclude=(r'.*_temporary.*', r'global_step'))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(
sess, saver, config.logdir, checkpoint, resume=True)
for unused_score in loop.run(sess, saver, total_steps):
pass
batch_env.close()
def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,
FLAGS.checkpoint, FLAGS.env_processes)
if __name__ == '__main__':
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'logdir', None,
'Directory to the checkpoint of a training run.')
tf.app.flags.DEFINE_string(
'outdir', None,
'Local directory for storing the monitoring outdir.')
tf.app.flags.DEFINE_string(
'checkpoint', None,
'Checkpoint name to load; defaults to most recent.')
tf.app.flags.DEFINE_integer(
'num_agents', 1,
'How many environments to step in parallel.')
tf.app.flags.DEFINE_integer(
'num_episodes', 5,
'Minimum number of episodes to render.')
tf.app.flags.DEFINE_boolean(
'env_processes', True,
'Step environments in separate processes to circumvent the GIL.')
tf.app.run()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PPO algorithm usage example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import tensorflow as tf
from agents import algorithms
from agents import tools
from agents.scripts import configs
from agents.scripts import networks
from agents.scripts import train
class PPOTest(tf.test.TestCase):
def test_pendulum_no_crash(self):
nets = networks.feed_forward_gaussian, networks.recurrent_gaussian
for network in nets:
config = self._define_config()
with config.unlocked:
config.env = 'Pendulum-v0'
config.max_length = 200
config.steps = 500
config.network = network
for score in train.train(config, env_processes=True):
float(score)
def test_no_crash_cartpole(self):
config = self._define_config()
with config.unlocked:
config.env = 'CartPole-v1'
config.max_length = 200
config.steps = 500
config.normalize_ranges = False # The env reports wrong ranges.
config.network = networks.feed_forward_categorical
for score in train.train(config, env_processes=True):
float(score)
def test_no_crash_observation_shape(self):
nets = networks.feed_forward_gaussian, networks.recurrent_gaussian
observ_shapes = (1,), (2, 3), (2, 3, 4)
for network, observ_shape in itertools.product(nets, observ_shapes):
config = self._define_config()
with config.unlocked:
config.env = functools.partial(
tools.MockEnvironment, observ_shape, action_shape=(3,),
min_duration=15, max_duration=15)
config.max_length = 20
config.steps = 50
config.network = network
for score in train.train(config, env_processes=False):
float(score)
def test_no_crash_variable_duration(self):
config = self._define_config()
with config.unlocked:
config.env = functools.partial(
tools.MockEnvironment, observ_shape=(2, 3), action_shape=(3,),
min_duration=5, max_duration=25)
config.max_length = 25
config.steps = 100
config.network = networks.recurrent_gaussian
for score in train.train(config, env_processes=False):
float(score)
def test_no_crash_chunking(self):
config = self._define_config()
with config.unlocked:
config.env = functools.partial(
tools.MockEnvironment, observ_shape=(2, 3), action_shape=(3,),
min_duration=5, max_duration=25)
config.max_length = 25
config.steps = 100
config.network = networks.recurrent_gaussian
config.chunk_length = 10
config.batch_size = 5
for score in train.train(config, env_processes=False):
float(score)
def _define_config(self):
# Start from the example configuration.
locals().update(configs.default())
# pylint: disable=unused-variable
# General
algorithm = algorithms.PPO
num_agents = 2
update_every = 4
use_gpu = False
# Network
policy_layers = 20, 10
value_layers = 20, 10
# Optimization
update_epochs_policy = 2
update_epochs_value = 2
# pylint: enable=unused-variable
return tools.AttrDict(locals())
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy networks for agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import gym
import numpy as np
import tensorflow as tf
import agents
tfd = tf.contrib.distributions
# TensorFlow's default implementation of the KL divergence between two
# tf.contrib.distributions.MultivariateNormalDiag instances sometimes results
# in NaN values in the gradients (not in the forward pass). Until the default
# implementation is fixed, we use our own KL implementation.
class CustomKLDiagNormal(tfd.MultivariateNormalDiag):
"""Multivariate Normal with diagonal covariance and our custom KL code."""
pass
@tfd.RegisterKL(CustomKLDiagNormal, CustomKLDiagNormal)
def _custom_diag_normal_kl(lhs, rhs, name=None): # pylint: disable=unused-argument
"""Empirical KL divergence of two normals with diagonal covariance.
Args:
lhs: Diagonal Normal distribution.
rhs: Diagonal Normal distribution.
name: Name scope for the op.
Returns:
KL divergence from lhs to rhs.
"""
with tf.name_scope(name or 'kl_divergence'):
mean0 = lhs.mean()
mean1 = rhs.mean()
logstd0 = tf.log(lhs.stddev())
logstd1 = tf.log(rhs.stddev())
logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1
return 0.5 * (
tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) +
tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) +
tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) -
mean0.shape[-1].value)
def feed_forward_gaussian(
config, action_space, observations, unused_length, state=None):
"""Independent feed forward networks for policy and value.
The policy network outputs the mean action and the standard deviation is
learned as independent parameter vector.
Args:
config: Configuration object.
action_space: Action space of the environment.
observations: Sequences of observations.
unused_length: Batch of sequence lengths.
state: Unused batch of initial states.
Raises:
ValueError: Unexpected action space.
Returns:
Attribute dictionary containing the policy, value, and unused state.
"""
if not isinstance(action_space, gym.spaces.Box):
raise ValueError('Network expects continuous actions.')
if not len(action_space.shape) == 1:
raise ValueError('Network only supports 1D action vectors.')
action_size = action_space.shape[0]
init_output_weights = tf.contrib.layers.variance_scaling_initializer(
factor=config.init_output_factor)
before_softplus_std_initializer = tf.constant_initializer(
np.log(np.exp(config.init_std) - 1))
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope('policy'):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
mean = tf.contrib.layers.fully_connected(
x, action_size, tf.tanh,
weights_initializer=init_output_weights)
std = tf.nn.softplus(tf.get_variable(
'before_softplus_std', mean.shape[2:], tf.float32,
before_softplus_std_initializer))
std = tf.tile(
std[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope('value'):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
mean = tf.check_numerics(mean, 'mean')
std = tf.check_numerics(std, 'std')
value = tf.check_numerics(value, 'value')
policy = CustomKLDiagNormal(mean, std)
return agents.tools.AttrDict(policy=policy, value=value, state=state)
def feed_forward_categorical(
config, action_space, observations, unused_length, state=None):
"""Independent feed forward networks for policy and value.
The policy network outputs the mean action and the log standard deviation
is learned as independent parameter vector.
Args:
config: Configuration object.
action_space: Action space of the environment.
observations: Sequences of observations.
unused_length: Batch of sequence lengths.
state: Unused batch of initial recurrent states.
Raises:
ValueError: Unexpected action space.
Returns:
Attribute dictionary containing the policy, value, and unused state.
"""
init_output_weights = tf.contrib.layers.variance_scaling_initializer(
factor=config.init_output_factor)
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError('Network expects discrete actions.')
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope('policy'):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(
x, action_space.n, None, weights_initializer=init_output_weights)
with tf.variable_scope('value'):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
policy = tfd.Categorical(logits)
return agents.tools.AttrDict(policy=policy, value=value, state=state)
def recurrent_gaussian(
config, action_space, observations, length, state=None):
"""Independent recurrent policy and feed forward value networks.
The policy network outputs the mean action and the standard deviation is
learned as independent parameter vector. The last policy layer is recurrent
and uses a GRU cell.
Args:
config: Configuration object.
action_space: Action space of the environment.
observations: Sequences of observations.
length: Batch of sequence lengths.
state: Batch of initial recurrent states.
Raises:
ValueError: Unexpected action space.
Returns:
Attribute dictionary containing the policy, value, and state.
"""
if not isinstance(action_space, gym.spaces.Box):
raise ValueError('Network expects continuous actions.')
if not len(action_space.shape) == 1:
raise ValueError('Network only supports 1D action vectors.')
action_size = action_space.shape[0]
init_output_weights = tf.contrib.layers.variance_scaling_initializer(
factor=config.init_output_factor)
before_softplus_std_initializer = tf.constant_initializer(
np.log(np.exp(config.init_std) - 1))
cell = tf.contrib.rnn.GRUBlockCell(config.policy_layers[-1])
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope('policy'):
x = flat_observations
for size in config.policy_layers[:-1]:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
x, state = tf.nn.dynamic_rnn(cell, x, length, state, tf.float32)
mean = tf.contrib.layers.fully_connected(
x, action_size, tf.tanh,
weights_initializer=init_output_weights)
std = tf.nn.softplus(tf.get_variable(
'before_softplus_std', mean.shape[2:], tf.float32,
before_softplus_std_initializer))
std = tf.tile(
std[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope('value'):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
mean = tf.check_numerics(mean, 'mean')
std = tf.check_numerics(std, 'std')
value = tf.check_numerics(value, 'value')
policy = CustomKLDiagNormal(mean, std)
return agents.tools.AttrDict(policy=policy, value=value, state=state)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to train a batch reinforcement learning algorithm.
Command line:
python3 -m agents.scripts.train --logdir=/path/to/logdir --config=pendulum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import gym
import tensorflow as tf
from agents import tools
from agents.scripts import configs
from agents.scripts import utility
def _create_environment(config):
"""Constructor for an instance of the environment.
Args:
config: Object providing configurations via attributes.
Raises:
NotImplementedError: For action spaces other than Box and Discrete.
Returns:
Wrapped OpenAI Gym environment.
"""
if isinstance(config.env, str):
env = gym.make(config.env)
else:
env = config.env()
if config.max_length:
env = tools.wrappers.LimitDuration(env, config.max_length)
if isinstance(env.action_space, gym.spaces.Box):
if config.normalize_ranges:
env = tools.wrappers.RangeNormalize(env)
env = tools.wrappers.ClipAction(env)
elif isinstance(env.action_space, gym.spaces.Discrete):
if config.normalize_ranges:
env = tools.wrappers.RangeNormalize(env, action=False)
else:
message = "Unsupported action space '{}'".format(type(env.action_space))
raise NotImplementedError(message)
env = tools.wrappers.ConvertTo32Bit(env)
env = tools.wrappers.CacheSpaces(env)
return env
def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
logdir, graph.step, graph.should_log, graph.do_report,
graph.force_reset)
loop.add_phase(
'train', graph.done, graph.score, graph.summary, train_steps,
report_every=train_steps,
log_every=train_steps // 2,
checkpoint_every=None,
feed={graph.is_training: True})
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=eval_steps // 2,
checkpoint_every=10 * eval_steps,
feed={graph.is_training: False})
return loop
def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
"""
tf.reset_default_graph()
if config.update_every % config.num_agents:
tf.logging.warn('Number of agents should divide episodes per update.')
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config),
config.num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
loop = _define_loop(
graph, config.logdir,
config.update_every * config.max_length,
config.eval_episodes * config.max_length)
total_steps = int(
config.steps / config.update_every *
(config.update_every + config.eval_episodes))
# Exclude episode related variables since the Python state of environments is
# not checkpointed and thus new episodes start after resuming.
saver = utility.define_saver(exclude=(r'.*_temporary.*',))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(sess, saver, config.logdir)
for score in loop.run(sess, saver, total_steps):
yield score
batch_env.close()
def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
config = utility.load_config(logdir)
except IOError:
config = tools.AttrDict(getattr(configs, FLAGS.config)())
config = utility.save_config(config, logdir)
for score in train(config, FLAGS.env_processes):
tf.logging.info('Score {}.'.format(score))
if __name__ == '__main__':
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'logdir', None,
'Base directory to store logs.')
tf.app.flags.DEFINE_string(
'timestamp', datetime.datetime.now().strftime('%Y%m%dT%H%M%S'),
'Sub directory to store logs.')
tf.app.flags.DEFINE_string(
'config', None,
'Configuration to execute.')
tf.app.flags.DEFINE_boolean(
'env_processes', True,
'Step environments in separate processes to circumvent the GIL.')
tf.app.run()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Memory that stores episodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents import tools
class EpisodeMemory(object):
"""Memory that stores episodes."""
def __init__(self, template, capacity, max_length, scope):
"""Create a memory that stores episodes.
Each transition tuple consists of quantities specified by the template.
These quantities would typically be be observations, actions, rewards, and
done indicators.
Args:
template: Nested tensors to derive shapes and dtypes of each transition.
capacity: Number of episodes, or rows, hold by the memory.
max_length: Allocated sequence length for the episodes.
scope: Variable scope to use for internal variables.
"""
self._capacity = capacity
self._max_length = max_length
with tf.variable_scope(scope) as var_scope:
self._scope = var_scope
self._length = tf.Variable(tf.zeros(capacity, tf.int32), False)
self._buffers = tools.nested.map(
lambda x: tf.Variable(tf.zeros(
[capacity, max_length] + x.shape.as_list(), x.dtype), False),
template)
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)
length = tf.gather(self._length, rows)
return episode, length
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalize tensors based on streaming estimates of mean and variance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class StreamingNormalize(object):
"""Normalize tensors based on streaming estimates of mean and variance."""
def __init__(
self, template, center=True, scale=True, clip=10, name='normalize'):
"""Normalize tensors based on streaming estimates of mean and variance.
Centering the value, scaling it by the standard deviation, and clipping
outlier values are optional.
Args:
template: Example tensor providing shape and dtype of the vaule to track.
center: Python boolean indicating whether to subtract mean from values.
scale: Python boolean indicating whether to scale values by stddev.
clip: If and when to clip normalized values.
name: Parent scope of operations provided by this class.
"""
self._center = center
self._scale = scale
self._clip = clip
self._name = name
with tf.name_scope(name):
self._count = tf.Variable(0, False)
self._mean = tf.Variable(tf.zeros_like(template), False)
self._var_sum = tf.Variable(tf.zeros_like(template), False)
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value')
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reusable parts for building agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .normalize import StreamingNormalize
from .memory import EpisodeMemory
from .iterate_sequences import iterate_sequences
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalize tensors based on streaming estimates of mean and variance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents import tools
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1)
def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
"""
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(tools.nested.flatten(sequence)[0])[0]
num_chunks = (length - 1) // chunk_length + 1
padding_length = chunk_length * num_chunks - length
padded = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.concat([
tensor, 0 * tensor[:padding_length] + padding_value], 0),
sequence)
chunks = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
padded)
chunks['length'] = tf.concat([
chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),
[chunk_length - padding_length]], 0)
return chunks
def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
"""
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.