python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the new arg_scope used for TF-Slim ops.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use scopes.arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005):
net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
The first call to conv2d will use predefined args:
ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
stddev=0.01, weight_decay=0.0005, scope='conv1')
The second call to Conv will overwrite padding:
ops.conv2d(inputs, 256, [5, 5], padding='SAME',
stddev=0.01, weight_decay=0.0005, scope='conv2')
Example of how to reuse an arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope:
net = ops.conv2d(net, 256, [5, 5], scope='conv1')
....
with scopes.arg_scope(conv2d_arg_scope):
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
Example of how to use scopes.add_arg_scope:
@scopes.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
from tensorflow.python.framework import ops
_ARGSTACK_KEY = ("__arg_stack",)
_DECORATED_OPS = set()
def _get_arg_stack():
stack = ops.get_collection(_ARGSTACK_KEY)
if stack:
return stack[0]
else:
stack = [{}]
ops.add_to_collection(_ARGSTACK_KEY, stack)
return stack
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _add_op(op):
key_op = (op.__module__, op.__name__)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS.add(key_op)
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containg the current scope. When list_ops_or_scope is a dict,
kwargs must be empty. When list_ops_or_scope is a list or tuple, then
every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError("When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty.")
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError("list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)")
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
key_op = (op.__module__, op.__name__)
if not has_arg_scope(op):
raise ValueError("%s is not decorated with @add_arg_scope", key_op)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
return func_with_args
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
key_op = (func.__module__, func.__name__)
return key_op in _DECORATED_OPS
| models-master | inception/inception/slim/scopes.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import slim
def get_variables(scope=None):
return slim.variables.get_variables(scope)
def get_variables_by_name(name):
return slim.variables.get_variables_by_name(name)
class CollectionsTest(tf.test.TestCase):
def testVariables(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d],
batch_norm_params={'decay': 0.9997}):
slim.inception.inception_v3(inputs)
self.assertEqual(len(get_variables()), 388)
self.assertEqual(len(get_variables_by_name('weights')), 98)
self.assertEqual(len(get_variables_by_name('biases')), 2)
self.assertEqual(len(get_variables_by_name('beta')), 96)
self.assertEqual(len(get_variables_by_name('gamma')), 0)
self.assertEqual(len(get_variables_by_name('moving_mean')), 96)
self.assertEqual(len(get_variables_by_name('moving_variance')), 96)
def testVariablesWithoutBatchNorm(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d],
batch_norm_params=None):
slim.inception.inception_v3(inputs)
self.assertEqual(len(get_variables()), 196)
self.assertEqual(len(get_variables_by_name('weights')), 98)
self.assertEqual(len(get_variables_by_name('biases')), 98)
self.assertEqual(len(get_variables_by_name('beta')), 0)
self.assertEqual(len(get_variables_by_name('gamma')), 0)
self.assertEqual(len(get_variables_by_name('moving_mean')), 0)
self.assertEqual(len(get_variables_by_name('moving_variance')), 0)
def testVariablesByLayer(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d],
batch_norm_params={'decay': 0.9997}):
slim.inception.inception_v3(inputs)
self.assertEqual(len(get_variables()), 388)
self.assertEqual(len(get_variables('conv0')), 4)
self.assertEqual(len(get_variables('conv1')), 4)
self.assertEqual(len(get_variables('conv2')), 4)
self.assertEqual(len(get_variables('conv3')), 4)
self.assertEqual(len(get_variables('conv4')), 4)
self.assertEqual(len(get_variables('mixed_35x35x256a')), 28)
self.assertEqual(len(get_variables('mixed_35x35x288a')), 28)
self.assertEqual(len(get_variables('mixed_35x35x288b')), 28)
self.assertEqual(len(get_variables('mixed_17x17x768a')), 16)
self.assertEqual(len(get_variables('mixed_17x17x768b')), 40)
self.assertEqual(len(get_variables('mixed_17x17x768c')), 40)
self.assertEqual(len(get_variables('mixed_17x17x768d')), 40)
self.assertEqual(len(get_variables('mixed_17x17x768e')), 40)
self.assertEqual(len(get_variables('mixed_8x8x2048a')), 36)
self.assertEqual(len(get_variables('mixed_8x8x2048b')), 36)
self.assertEqual(len(get_variables('logits')), 2)
self.assertEqual(len(get_variables('aux_logits')), 10)
def testVariablesToRestore(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d],
batch_norm_params={'decay': 0.9997}):
slim.inception.inception_v3(inputs)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
self.assertEqual(len(variables_to_restore), 388)
self.assertListEqual(variables_to_restore, get_variables())
def testVariablesToRestoreWithoutLogits(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d],
batch_norm_params={'decay': 0.9997}):
slim.inception.inception_v3(inputs, restore_logits=False)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
self.assertEqual(len(variables_to_restore), 384)
def testRegularizationLosses(self):
batch_size = 5
height, width = 299, 299
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
slim.inception.inception_v3(inputs)
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), len(get_variables_by_name('weights')))
def testTotalLossWithoutRegularization(self):
batch_size = 5
height, width = 299, 299
num_classes = 1001
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
dense_labels = tf.random_uniform((batch_size, num_classes))
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0):
logits, end_points = slim.inception.inception_v3(
inputs,
num_classes=num_classes)
# Cross entropy loss for the main softmax prediction.
slim.losses.cross_entropy_loss(logits,
dense_labels,
label_smoothing=0.1,
weight=1.0)
# Cross entropy loss for the auxiliary softmax head.
slim.losses.cross_entropy_loss(end_points['aux_logits'],
dense_labels,
label_smoothing=0.1,
weight=0.4,
scope='aux_loss')
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
self.assertEqual(len(losses), 2)
def testTotalLossWithRegularization(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
dense_labels = tf.random_uniform((batch_size, num_classes))
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
logits, end_points = slim.inception.inception_v3(inputs, num_classes)
# Cross entropy loss for the main softmax prediction.
slim.losses.cross_entropy_loss(logits,
dense_labels,
label_smoothing=0.1,
weight=1.0)
# Cross entropy loss for the auxiliary softmax head.
slim.losses.cross_entropy_loss(end_points['aux_logits'],
dense_labels,
label_smoothing=0.1,
weight=0.4,
scope='aux_loss')
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
self.assertEqual(len(losses), 2)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(reg_losses), 98)
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/collections_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer,
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from inception.slim import scopes
# Collection containing all the variables created using slim.variables
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = graph_pb2.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| models-master | inception/inception/slim/variables.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import inception_model as inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('logits' in end_points)
logits = end_points['logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('aux_logits' in end_points)
aux_logits = end_points['aux_logits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['mixed_8x8x2048b']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v3(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v3(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['mixed_8x8x2048b']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.initialize_all_variables())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/inception_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests slim.scopes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import scopes
@scopes.add_arg_scope
def func1(*args, **kwargs):
return (args, kwargs)
@scopes.add_arg_scope
def func2(*args, **kwargs):
return (args, kwargs)
class ArgScopeTest(tf.test.TestCase):
def testEmptyArgScope(self):
with self.test_session():
self.assertEqual(scopes._current_arg_scope(), {})
def testCurrentArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = (func1.__module__, func1.__name__)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope:
self.assertDictEqual(scope, current_scope)
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = lambda f: (f.__module__, f.__name__)
current_scope = {key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()}
with self.test_session():
with scopes.arg_scope([func1], a=1, b=None, c=[1]):
with scopes.arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = (func1.__module__, func1.__name__)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1:
pass
with scopes.arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = lambda f: (f.__module__, f.__name__)
current_scope1 = {key(func1): func1_kwargs.copy()}
current_scope2 = {key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()}
with self.test_session():
with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1:
with scopes.arg_scope([func2], b=2, d=[2]) as scope2:
pass
with scopes.arg_scope(scope1):
self.assertDictEqual(scopes._current_arg_scope(), current_scope1)
with scopes.arg_scope(scope2):
self.assertDictEqual(scopes._current_arg_scope(), current_scope2)
def testSimpleArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with scopes.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSimpleArgScopeWithTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with scopes.arg_scope((func1,), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testOverwriteArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
with scopes.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0, b=2)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with scopes.arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with scopes.arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScopeTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with scopes.arg_scope([func1, func2], a=1, b=None):
with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/scopes_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-Slim grouped API. Please see README.md for details and usage."""
# pylint: disable=unused-import
# Collapse tf-slim into a single namespace.
from inception.slim import inception_model as inception
from inception.slim import losses
from inception.slim import ops
from inception.slim import scopes
from inception.slim import variables
from inception.slim.scopes import arg_scope
| models-master | inception/inception/slim/slim.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
from inception.slim import losses
from inception.slim import scopes
from inception.slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer,
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer,
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is not None:
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(conv, **batch_norm_params)
else:
bias_shape = [num_filters_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.op_scope([labels], scope, 'OneHotEncoding'):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.op_scope([inputs], scope, 'MaxPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None, seed=1):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.op_scope([inputs], scope, 'Dropout'):
return tf.nn.dropout(inputs, keep_prob, seed=seed)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.op_scope([inputs], scope, 'Flatten'):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
| models-master | inception/inception/slim/ops.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception-v3 expressed in TensorFlow-Slim.
Usage:
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the batch_norm moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import ops
from inception.slim import scopes
FLAGS = tf.app.flags.FLAGS
def inception_v3(inputs,
dropout_keep_prob=0.8,
num_classes=1000,
is_training=True,
restore_logits=True,
scope=''):
"""Latest Inception from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Args:
inputs: a tensor of size [batch_size, height, width, channels].
dropout_keep_prob: dropout keep_prob.
num_classes: number of predicted classes.
is_training: whether is training or not.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: Optional scope for op_scope.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
with tf.op_scope([inputs], scope, 'inception_v3'):
tf.set_random_seed(FLAGS.DANITER_SEED)
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
padding='SAME', scope='conv2')
# 147 x 147 x 64
end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
scope='conv4')
# 71 x 71 x 192.
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['pool2']
# Inception blocks
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 128, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 128, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 192, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 192, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
padding='VALID')
aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
padding='VALID')
aux_logits = ops.flatten(aux_logits)
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
stddev=0.001, restore=restore_logits)
end_points['aux_logits'] = aux_logits
# mixed_8: 8 x 8 x 1280.
# Note that the scope below is not changed to not void previous
# checkpoints.
# (TODO) Fix the scope when appropriate.
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 192, [1, 1])
branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
padding='VALID')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = ops.conv2d(net, 192, [1, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
end_points['mixed_17x17x1280a'] = net
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048a'] = net
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout', seed=FLAGS.DANITER_SEED)
net = ops.flatten(net, scope='flatten')
# 2048
logits = ops.fc(net, num_classes, activation=None, scope='logits',
restore=restore_logits)
# 1000
end_points['logits'] = logits
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
return logits, end_points
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_v3.
Args:
weight_decay: the weight decay for weights variables.
stddev: standard deviation of the truncated guassian weight distribution.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Yields:
a arg_scope with the parameters needed for inception_v3.
"""
# Set weight_decay for weights in Conv and FC layers.
with scopes.arg_scope([ops.conv2d, ops.fc],
weight_decay=weight_decay):
# Set stddev, activation and parameters for batch_norm.
with scopes.arg_scope([ops.conv2d],
stddev=stddev,
activation=tf.nn.relu,
seed=FLAGS.DANITER_SEED,
batch_norm_params={
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon}) as arg_scope:
yield arg_scope
| models-master | inception/inception/slim/inception_model.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from inception.slim import ops
from inception.slim import scopes
from inception.slim import variables
class ConvTest(tf.test.TestCase):
def testCreateConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateSquareConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, 3)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, images.get_shape()[1:3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
def testCreateVerticalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 1])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateHorizontalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [1, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateConvWithStride(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], stride=2)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height/2, width/2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], activation=None)
self.assertEquals(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateConvWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'Conv/weights/Regularizer/L2Regularizer/value')
sess.run(tf.initialize_all_variables())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateConvWithoutWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1',
reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3])
net = ops.conv2d(net, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3], scope='Conv')
net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0)
class FCTest(tf.test.TestCase):
def testCreateFC(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32)
self.assertEquals(output.op.name, 'FC/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, scope='fc1')
self.assertEquals(output.op.name, 'fc1/Relu')
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
ops.fc(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32, scope='fc1')
self.assertEquals(len(variables.get_variables('fc1')), 2)
ops.fc(inputs, 32, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 2)
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 4)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, activation=None)
self.assertEquals(output.op.name, 'FC/xw_plus_b')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'FC/weights/Regularizer/L2Regularizer/value')
sess.run(tf.initialize_all_variables())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateFCWithoutWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={}):
net = ops.fc(images, 27)
net = ops.fc(net, 27)
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}):
net = ops.fc(images, 27, scope='fc1')
net = ops.fc(net, 27, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3)
class MaxPoolTest(tf.test.TestCase):
def testCreateMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3])
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, 3)
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateMaxPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class AvgPoolTest(tf.test.TestCase):
def testCreateAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3])
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, 3)
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateAvgPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class OneHotEncodingTest(tf.test.TestCase):
def testOneHotEncodingCreate(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testOneHotEncoding(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
one_hot_labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class DropoutTest(tf.test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul_1')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.dropout(images, is_training=False)
self.assertEquals(output, images)
class FlattenTest(tf.test.TestCase):
def testFlatten4D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlattenBatchSize(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
inputs = tf.placeholder(tf.int32, (None, height, width, 3))
output = ops.flatten(inputs)
self.assertEquals(output.get_shape().as_list(),
[None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.size,
images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
class BatchNormTest(tf.test.TestCase):
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.batch_norm(images)
self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
beta = variables.get_variables_by_name('beta')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=True)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithoutScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=False)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
update_moving_mean = update_ops[0]
update_moving_variance = update_ops[1]
self.assertEquals(update_moving_mean.op.name,
'BatchNorm/AssignMovingAvg')
self.assertEquals(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True, scope='bn')
ops.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(len(beta), 1)
self.assertEquals(len(gamma), 1)
moving_vars = tf.get_collection('moving_vars')
self.assertEquals(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scope='bn')
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
ops.batch_norm(images, scope='bn', reuse=True)
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
_ = ops.batch_norm(images, moving_vars='moving_vars')
moving_mean = tf.get_collection('moving_vars',
'BatchNorm/moving_mean')
self.assertEquals(len(moving_mean), 1)
self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = tf.get_collection('moving_vars',
'BatchNorm/moving_variance')
self.assertEquals(len(moving_variance), 1)
self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testComputeMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
for _ in range(10):
sess.run([output])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(tf.initialize_all_variables())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/ops_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for various Neural Network TensorFlow losses.
All the losses defined here add themselves to the LOSSES_COLLECTION
collection.
l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso.
l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay.
cross_entropy_loss: Define a cross entropy loss using
softmax_cross_entropy_with_logits. Useful for classification.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# In order to gather all losses in a network, the user should use this
# key for get_collection, i.e:
# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
LOSSES_COLLECTION = '_losses'
def l1_regularizer(weight=1.0, scope=None):
"""Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L1Regularizer'):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
return regularizer
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L2Regularizer'):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
"""Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for op_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2,
dtype=tensor.dtype.base_dtype,
name='weight_l2')
reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
name='value_l1')
reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
return regularizer
def l1_loss(tensor, weight=1.0, scope=None):
"""Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
the L1 loss op.
"""
with tf.op_scope([tensor], scope, 'L1Loss'):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def l2_loss(tensor, weight=1.0, scope=None):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for op_scope.
Returns:
the L2 loss op.
"""
with tf.op_scope([tensor], scope, 'L2Loss'):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
weight=1.0, scope=None):
"""Define a Cross Entropy loss using softmax_cross_entropy_with_logits.
It can scale the loss by weight factor, and smooth the labels.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
label_smoothing: if greater than 0 then smooth the labels.
weight: scale the loss by this factor.
scope: Optional scope for op_scope.
Returns:
A tensor with the softmax_cross_entropy loss.
"""
logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())
with tf.op_scope([logits, one_hot_labels], scope, 'CrossEntropyLoss'):
num_classes = one_hot_labels.get_shape()[-1].value
one_hot_labels = tf.cast(one_hot_labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
one_hot_labels,
name='xentropy')
weight = tf.convert_to_tensor(weight,
dtype=logits.dtype.base_dtype,
name='loss_weight')
loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
| models-master | inception/inception/slim/losses.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import losses
class LossesTest(tf.test.TestCase):
def testL1Loss(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
weights = tf.constant(1.0, shape=shape)
wd = 0.01
loss = losses.l1_loss(weights, wd)
self.assertEquals(loss.op.name, 'L1Loss/value')
self.assertAlmostEqual(loss.eval(), num_elem * wd, 5)
def testL2Loss(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
weights = tf.constant(1.0, shape=shape)
wd = 0.01
loss = losses.l2_loss(weights, wd)
self.assertEquals(loss.op.name, 'L2Loss/value')
self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5)
class RegularizersTest(tf.test.TestCase):
def testL1Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L1Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def testL1RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_regularizer(scope='L1')(tensor)
self.assertEquals(loss.op.name, 'L1/value')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def testL1RegularizerWithWeight(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight = 0.01
loss = losses.l1_regularizer(weight)(tensor)
self.assertEquals(loss.op.name, 'L1Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem * weight, 5)
def testL2Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l2_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def testL2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l2_regularizer(scope='L2')(tensor)
self.assertEquals(loss.op.name, 'L2/value')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def testL2RegularizerWithWeight(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight = 0.01
loss = losses.l2_regularizer(weight)(tensor)
self.assertEquals(loss.op.name, 'L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5)
def testL1L2Regularizer(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_l2_regularizer()(tensor)
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def testL1L2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
loss = losses.l1_l2_regularizer(scope='L1L2')(tensor)
self.assertEquals(loss.op.name, 'L1L2/value')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def testL1L2RegularizerWithWeights(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = tf.constant(1.0, shape=shape)
weight_l1 = 0.01
weight_l2 = 0.05
loss = losses.l1_l2_regularizer(weight_l1, weight_l2)(tensor)
self.assertEquals(loss.op.name, 'L1L2Regularizer/value')
self.assertAlmostEqual(loss.eval(),
num_elem * weight_l1 + num_elem * weight_l2 / 2, 5)
class CrossEntropyLossTest(tf.test.TestCase):
def testCrossEntropyLossAllCorrect(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = losses.cross_entropy_loss(logits, labels)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testCrossEntropyLossAllWrong(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = losses.cross_entropy_loss(logits, labels)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testCrossEntropyLossAllWrongWithWeight(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = losses.cross_entropy_loss(logits, labels, weight=0.5)
self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
self.assertAlmostEqual(loss.eval(), 5.0, 3)
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/losses_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import scopes
from inception.slim import variables
class VariablesTest(tf.test.TestCase):
def testCreateVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
def testGetVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('B'):
b = variables.variable('a', [5])
self.assertEquals([a, b], variables.get_variables())
self.assertEquals([a], variables.get_variables('A'))
self.assertEquals([b], variables.get_variables('B'))
def testGetVariablesSuffix(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('A'):
b = variables.variable('b', [5])
self.assertEquals([a], variables.get_variables(suffix='a'))
self.assertEquals([b], variables.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.test_session():
with tf.variable_scope('parent'):
a = variables.variable('child', [5])
self.assertEquals(a, variables.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.test_session():
with tf.variable_scope('parent'):
a = variables.variable('child', [5])
with tf.variable_scope('child'):
variables.variable('grandchild1', [7])
variables.variable('grandchild2', [9])
self.assertEquals(a, variables.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.test_session():
with self.assertRaises(ValueError):
variables.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.test_session():
with tf.variable_scope(var_name):
variables.variable('grandchild1', [7])
variables.variable('grandchild2', [9])
with self.assertRaises(ValueError):
variables.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('B'):
b = variables.variable('a', [5])
self.assertEquals([a, b], variables.get_variables_to_restore())
def testNoneGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5], restore=False)
with tf.variable_scope('B'):
b = variables.variable('a', [5], restore=False)
self.assertEquals([], variables.get_variables_to_restore())
self.assertEquals([a, b], variables.get_variables())
def testGetMixedVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
b = variables.variable('b', [5], restore=False)
with tf.variable_scope('B'):
c = variables.variable('c', [5])
d = variables.variable('d', [5], restore=False)
self.assertEquals([a, b, c, d], variables.get_variables())
self.assertEquals([a, c], variables.get_variables_to_restore())
def testReuseVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [])
with tf.variable_scope('A', reuse=True):
b = variables.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], variables.get_variables())
def testVariableWithDevice(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [], device='cpu:0')
b = variables.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.test_session():
with tf.device('/cpu:0'):
a = variables.variable('a', [])
b = variables.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.test_session():
with scopes.arg_scope([variables.variable], device=DevFn()):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(a.initial_value.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertDeviceEqual(b.initial_value.device, 'cpu:1')
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertDeviceEqual(c.initial_value.device, 'cpu:12')
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertDeviceEqual(d.initial_value.device, 'cpu:2')
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.test_session():
with tf.device(tf.train.replica_device_setter(ps_tasks=2)):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(a.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(b.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertDeviceEqual(c.initial_value.device, '/job:worker/cpu:12')
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(d.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with tf.Graph().as_default():
device_fn = variables.VariableDeviceChooser(num_parameter_servers=2)
with scopes.arg_scope([variables.variable], device=device_fn):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertDeviceEqual(c.initial_value.device, c.device)
self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(d.initial_value.device, d.device)
self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableGPUPlacement(self):
with tf.Graph().as_default():
device_fn = variables.VariableDeviceChooser(placement='gpu:0')
with scopes.arg_scope([variables.variable], device=device_fn):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/gpu:0')
self.assertDeviceEqual(a.initial_value.device, a.device)
self.assertDeviceEqual(b.device, '/gpu:0')
self.assertDeviceEqual(b.initial_value.device, b.device)
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertDeviceEqual(c.initial_value.device, c.device)
self.assertDeviceEqual(d.device, '/gpu:0')
self.assertDeviceEqual(d.initial_value.device, d.device)
self.assertDeviceEqual(e.device, '/gpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableCollection(self):
with self.test_session():
a = variables.variable('a', [], collections='A')
b = variables.variable('b', [], collections='B')
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
def testVariableCollections(self):
with self.test_session():
a = variables.variable('a', [], collections=['A', 'C'])
b = variables.variable('b', [], collections=['B', 'C'])
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
def testVariableCollectionsWithArgScope(self):
with self.test_session():
with scopes.arg_scope([variables.variable], collections='A'):
a = variables.variable('a', [])
b = variables.variable('b', [])
self.assertListEqual([a, b], tf.get_collection('A'))
def testVariableCollectionsWithArgScopeNested(self):
with self.test_session():
with scopes.arg_scope([variables.variable], collections='A'):
a = variables.variable('a', [])
with scopes.arg_scope([variables.variable], collections='B'):
b = variables.variable('b', [])
self.assertEquals(a, tf.get_collection('A')[0])
self.assertEquals(b, tf.get_collection('B')[0])
def testVariableCollectionsWithArgScopeNonNested(self):
with self.test_session():
with scopes.arg_scope([variables.variable], collections='A'):
a = variables.variable('a', [])
with scopes.arg_scope([variables.variable], collections='B'):
b = variables.variable('b', [])
variables.variable('c', [])
self.assertListEqual([a], tf.get_collection('A'))
self.assertListEqual([b], tf.get_collection('B'))
def testVariableRestoreWithArgScopeNested(self):
with self.test_session():
with scopes.arg_scope([variables.variable], restore=True):
a = variables.variable('a', [])
with scopes.arg_scope([variables.variable],
trainable=False,
collections=['A', 'B']):
b = variables.variable('b', [])
c = variables.variable('c', [])
self.assertListEqual([a, b, c], variables.get_variables_to_restore())
self.assertListEqual([a, c], tf.trainable_variables())
self.assertListEqual([b], tf.get_collection('A'))
self.assertListEqual([b], tf.get_collection('B'))
class GetVariablesByNameTest(tf.test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
b = variables.variable('b', [5])
self.assertEquals([a], variables.get_variables_by_name('a'))
self.assertEquals([b], variables.get_variables_by_name('b'))
def testGetVariablesByNameReturnsByValueWithScope(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
matched_variables = variables.get_variables_by_name('a')
# If variables.get_variables_by_name returns the list by reference, the
# following append should persist, and be returned, in subsequent calls
# to variables.get_variables_by_name('a').
matched_variables.append(4)
matched_variables = variables.get_variables_by_name('a')
self.assertEquals([a], matched_variables)
def testGetVariablesByNameReturnsByValueWithoutScope(self):
with self.test_session():
a = variables.variable('a', [5])
matched_variables = variables.get_variables_by_name('a')
# If variables.get_variables_by_name returns the list by reference, the
# following append should persist, and be returned, in subsequent calls
# to variables.get_variables_by_name('a').
matched_variables.append(4)
matched_variables = variables.get_variables_by_name('a')
self.assertEquals([a], matched_variables)
class GlobalStepTest(tf.test.TestCase):
def testStable(self):
with tf.Graph().as_default():
gs = variables.global_step()
gs2 = variables.global_step()
self.assertTrue(gs is gs2)
def testDevice(self):
with tf.Graph().as_default():
with scopes.arg_scope([variables.global_step], device='/gpu:0'):
gs = variables.global_step()
self.assertDeviceEqual(gs.device, '/gpu:0')
def testDeviceFn(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return '/cpu:%d' % self.counter
with tf.Graph().as_default():
with scopes.arg_scope([variables.global_step], device=DevFn()):
gs = variables.global_step()
gs2 = variables.global_step()
self.assertDeviceEqual(gs.device, '/cpu:0')
self.assertEquals(gs, gs2)
self.assertDeviceEqual(gs2.device, '/cpu:0')
def testReplicaDeviceSetter(self):
device_fn = tf.train.replica_device_setter(2)
with tf.Graph().as_default():
with scopes.arg_scope([variables.global_step], device=device_fn):
gs = variables.global_step()
gs2 = variables.global_step()
self.assertEquals(gs, gs2)
self.assertDeviceEqual(gs.device, '/job:ps/task:0')
self.assertDeviceEqual(gs.initial_value.device, '/job:ps/task:0')
self.assertDeviceEqual(gs2.device, '/job:ps/task:0')
self.assertDeviceEqual(gs2.initial_value.device, '/job:ps/task:0')
def testVariableWithVariableDeviceChooser(self):
with tf.Graph().as_default():
device_fn = variables.VariableDeviceChooser()
with scopes.arg_scope([variables.global_step], device=device_fn):
gs = variables.global_step()
gs2 = variables.global_step()
self.assertEquals(gs, gs2)
self.assertDeviceEqual(gs.device, 'cpu:0')
self.assertDeviceEqual(gs.initial_value.device, gs.device)
self.assertDeviceEqual(gs2.device, 'cpu:0')
self.assertDeviceEqual(gs2.initial_value.device, gs2.device)
if __name__ == '__main__':
tf.test.main()
| models-master | inception/inception/slim/variables_test.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
The raw ImageNet validation data set is expected to reside in JPEG files
located in the following directory structure.
data_dir/ILSVRC2012_val_00000001.JPEG
data_dir/ILSVRC2012_val_00000002.JPEG
...
data_dir/ILSVRC2012_val_00050000.JPEG
This script moves the files into a directory structure like such:
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
This directory reorganization requires a mapping from validation image
number (i.e. suffix of the original file) to the associated label. This
is provided in the ImageNet development kit via a Matlab file.
In order to make life easier and divorce ourselves from Matlab, we instead
supply a custom text file that provides this mapping for us.
Sample usage:
./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
imagenet_2012_validation_synset_labels.txt
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Invalid usage\n'
'usage: preprocess_imagenet_validation_data.py '
'<validation data dir> <validation labels file>')
sys.exit(-1)
data_dir = sys.argv[1]
validation_labels_file = sys.argv[2]
# Read in the 50000 synsets associated with the validation data set.
labels = [l.strip() for l in open(validation_labels_file).readlines()]
unique_labels = set(labels)
# Make all sub-directories in the validation data dir.
for label in unique_labels:
labeled_data_dir = os.path.join(data_dir, label)
os.makedirs(labeled_data_dir)
# Move all of the image to the appropriate sub-directory.
for i in xrange(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename):
print('Failed to find: ' % original_filename)
sys.exit(-1)
new_filename = os.path.join(data_dir, labels[i], basename)
os.rename(original_filename, new_filename)
| models-master | inception/inception/data/preprocess_imagenet_validation_data.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/data/build_imagenet_data.py |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
return int(GetItem(name, root, index))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in xrange(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
| models-master | inception/inception/data/process_bounding_boxes.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
| models-master | inception/inception/data/build_image_data.py |
#!/bin/python
from subprocess import call, Popen
import time
import signal
import sys, os
def kill_exp():
call(["pkill", "-u", "daniter", "-f","imagenet_distributed_train"])
call(["ssh", "raiders3","pkill -u daniter -f imagenet_distributed_train"])
def signal_handler(signal, frame):
kill_exp()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
momentum = [0.3, 0.0, -0.3]
lr = [0.001, 0.005, 0.0005]
SYNC = "True"
ASYNC = "False"
exe = "./local_runner.sh"
prefix = "LongRun-16-2hr"
runs = [(SYNC, 0.001, 0.6)]
def run(m,l, sync):
print("Testing (%s) momentum: %f, learning rate: %f" % (sync,m,l))
cmd = [exe, str(l), str(m), sync, prefix]
print " ".join(cmd)
call(cmd)
time.sleep(120*60)
kill_exp()
if __name__=='__main__':
print("Starting experiments:")
for r in runs:
sync = r[0]
l = r[1]
m = r[2]
async_dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m), ASYNC)
sync_dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m), SYNC)
if sync == ASYNC:
if os.path.exists(async_dir_name):
print("Skipping " + async_dir_name)
else:
run(m,l, ASYNC)
else:
if os.path.exists(sync_dir_name):
print("Skipping " + sync_dir_name)
else:
run(m,l, SYNC)
| models-master | inception/utils/experiments-v2.py |
#!/bin/python
from subprocess import call, Popen
import time
import signal
import sys, os
def kill_exp():
call(["pkill", "-u", "daniter", "-f","imagenet_distributed_train"])
call(["ssh", "raiders3","pkill -u daniter -f imagenet_distributed_train"])
def signal_handler(signal, frame):
kill_exp()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
momentum = [0.3, 0.0, -0.3]
lr = [0.001, 0.005, 0.0005]
SYNC = "True"
ASYNC = "False"
exe = "./local_runner.sh"
prefix = "LongRun-16-seed-32345"
def isFailed(folder):
for log in os.listdir(folder):
with open(folder+"/"+log) as f:
if log == "ps.out":
for line in f.readlines():
if "assertion failed" in line:
return True
else:
for line in f.readlines():
if "failed to connect to" in line:
return True
return False
def run(m,l, sync):
print("Testing (%s) momentum: %f, learning rate: %f" % (sync,m,l))
if SYNC == sync:
m += 0.6
cmd = [exe, str(l), str(m), sync, prefix]
print " ".join(cmd)
call(cmd)
if sync == ASYNC:
dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m), ASYNC)
else:
dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m), SYNC)
for i in range(3): # try retry twice
time.sleep(2*60) # give 2 minutes to set up run
if isFailed(dir_name):
print("trying again")
kill_exp()
if i == 2:
return
call(cmd)
else:
break
time.sleep(28*60) # 28 + 2 = 30
kill_exp()
if __name__=='__main__':
print("Starting experiments:")
for m in momentum:
for l in lr:
async_dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m), ASYNC)
sync_dir_name = "%s-%s-%s-%s" % (prefix, str(l), str(m+0.6), SYNC)
if os.path.exists(async_dir_name):
print("Skipping " + async_dir_name)
else:
run(m,l, ASYNC)
if os.path.exists(sync_dir_name):
print("Skipping " + sync_dir_name)
else:
run(m,l, SYNC)
| models-master | inception/utils/experiments.py |
#!/bin/python
import sys
from os import listdir
import os
from shutil import copyfile
if __name__=='__main__':
if len(sys.argv) != 2:
print("Usage: %s <data-folder>" % __file__)
exit(1)
folder = sys.argv[1]
for i in range(16):
os.mkdir(folder+"-"+str(i))
#print folder+"-"+str(i)
j = 0
for f in listdir(folder):
src = folder+"/"+f
dst = folder+"-"+str(j%16)+"/"+f
#print src
#print dst
copyfile(src, dst)
j += 1
| models-master | inception/utils/make-data-directories.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("file_name", "", "Checkpoint filename")
tf.app.flags.DEFINE_string("file_two", "", "other checkpoint")
tf.app.flags.DEFINE_string("tensor_name", "", "Name of the tensor to inspect")
def print_tensors_in_checkpoint_file(file_name, file_two, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
reader1 = tf.train.NewCheckpointReader(file_name)
reader2 = tf.train.NewCheckpointReader(file_two)
keys2 = reader2.get_variable_to_shape_map().keys()
for k,v in reader1.get_variable_to_shape_map().items():
if k in keys2:
if not (reader1.get_tensor(k) == reader2.get_tensor(k)).all():
print(k)
if "Momentum" in k:
print(reader1.get_tensor(k).shape)
else:
print("missing key: %s" % k)
exit()
if not tensor_name:
print(reader.debug_string().decode("utf-8"))
else:
print("tensor_name: ", tensor_name)
print(reader.get_tensor(tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=checkpoint_file_name "
"[--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.file_two, FLAGS.tensor_name)
if __name__ == "__main__":
tf.app.run()
| models-master | inception/utils/checkpoints/compare_checkpoints.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("file_name", "", "Checkpoint filename")
tf.app.flags.DEFINE_string("tensor_name", "", "Name of the tensor to inspect")
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
reader = tf.train.NewCheckpointReader(file_name)
for k,v in reader.get_variable_to_shape_map().items():
if "Queue" in k:
print(k, v)
exit()
if not tensor_name:
print(reader.debug_string().decode("utf-8"))
else:
print("tensor_name: ", tensor_name)
print(reader.get_tensor(tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=checkpoint_file_name "
"[--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
tf.app.run()
| models-master | inception/utils/checkpoints/inspect_checkpoint.py |
#!/bin/python
import sys
from tensorflow.core.framework import graph_pb2
from google.protobuf import text_format
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.core.framework.variable_pb2 import SaveSliceInfoDef
import tensorflow as tf
ckpt = SaveSliceInfoDef()
print(dir(ckpt))
with open(sys.argv[1], "rb") as f:
text_format.Merge(f.read(), ckpt)
ckpt.ParseFromString(f.read)
exit()
graph_def = graph_pb2.GraphDef()
with open(sys.argv[1], "rb") as f:
text_format.Merge(f.read(), graph_def)
for node in graph_def.node:
if "local_steps" in node.name:
print(node.input)
| models-master | inception/utils/checkpoints/fix_models.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
import matplotlib.pyplot as plt
# %% Create a batch of three images (1600 x 1200)
# %% Image retrieved from:
# %% https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('cat.jpg')
im = im / 255.
im = im.reshape(1, 1200, 1600, 3)
im = im.astype('float32')
# %% Let the output size of the transformer be half the image size.
out_size = (600, 800)
# %% Simulate batch
batch = np.append(im, im, axis=0)
batch = np.append(batch, im, axis=0)
num_batch = 3
x = tf.placeholder(tf.float32, [None, 1200, 1600, 3])
x = tf.cast(batch, 'float32')
# %% Create localisation network and convolutional layer
with tf.variable_scope('spatial_transformer_0'):
# %% Create a fully-connected layer with 6 output nodes
n_fc = 6
W_fc1 = tf.Variable(tf.zeros([1200 * 1600 * 3, n_fc]), name='W_fc1')
# %% Zoom into the image
initial = np.array([[0.5, 0, 0], [0, 0.5, 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')
h_fc1 = tf.matmul(tf.zeros([num_batch, 1200 * 1600 * 3]), W_fc1) + b_fc1
h_trans = transformer(x, h_fc1, out_size)
# %% Run session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
y = sess.run(h_trans, feed_dict={x: batch})
# plt.imshow(y[0])
| models-master | transformer/example.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
from tf_utils import weight_variable, bias_variable, dense_to_one_hot
# %% Load data
mnist_cluttered = np.load('./data/mnist_sequence1_sample_5distortions5x5.npz')
X_train = mnist_cluttered['X_train']
y_train = mnist_cluttered['y_train']
X_valid = mnist_cluttered['X_valid']
y_valid = mnist_cluttered['y_valid']
X_test = mnist_cluttered['X_test']
y_test = mnist_cluttered['y_test']
# % turn from dense to one hot representation
Y_train = dense_to_one_hot(y_train, n_classes=10)
Y_valid = dense_to_one_hot(y_valid, n_classes=10)
Y_test = dense_to_one_hot(y_test, n_classes=10)
# %% Graph representation of our network
# %% Placeholders for 40x40 resolution
x = tf.placeholder(tf.float32, [None, 1600])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 40, 40, 1])
# %% We'll setup the two-layer localisation network to figure out the
# %% parameters for an affine transformation of the input
# %% Create variables for fully connected layer
W_fc_loc1 = weight_variable([1600, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, 6])
# Use identity transformation as starting point
initial = np.array([[1., 0, 0], [0, 1., 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
# %% Define the two layer localisation network
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob)
# %% Second layer
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2)
# %% We'll create a spatial transformer module to identify discriminative
# %% patches
out_size = (40, 40)
h_trans = transformer(x_tensor, h_fc_loc2, out_size)
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 3
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=h_trans,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 10 * 10 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([10 * 10 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
opt = tf.train.AdamOptimizer()
optimizer = opt.minimize(cross_entropy)
grads = opt.compute_gradients(cross_entropy, [b_fc_loc2])
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% We'll now train in minibatches and report accuracy, loss:
iter_per_epoch = 100
n_epochs = 500
train_size = 10000
indices = np.linspace(0, 10000 - 1, iter_per_epoch)
indices = indices.astype('int')
for epoch_i in range(n_epochs):
for iter_i in range(iter_per_epoch - 1):
batch_xs = X_train[indices[iter_i]:indices[iter_i+1]]
batch_ys = Y_train[indices[iter_i]:indices[iter_i+1]]
if iter_i % 10 == 0:
loss = sess.run(cross_entropy,
feed_dict={
x: batch_xs,
y: batch_ys,
keep_prob: 1.0
})
print('Iteration: ' + str(iter_i) + ' Loss: ' + str(loss))
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.8})
print('Accuracy (%d): ' % epoch_i + str(sess.run(accuracy,
feed_dict={
x: X_valid,
y: Y_valid,
keep_prob: 1.0
})))
# theta = sess.run(h_fc_loc2, feed_dict={
# x: batch_xs, keep_prob: 1.0})
# print(theta[0])
| models-master | transformer/cluttered_mnist.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# %% Borrowed utils from here: https://github.com/pkmital/tensorflow_tutorials/
import tensorflow as tf
import numpy as np
def conv2d(x, n_filters,
k_h=5, k_w=5,
stride_h=2, stride_w=2,
stddev=0.02,
activation=lambda x: x,
bias=True,
padding='SAME',
name="Conv2D"):
"""2D Convolution with options for kernel size, stride, and init deviation.
Parameters
----------
x : Tensor
Input tensor to convolve.
n_filters : int
Number of filters to apply.
k_h : int, optional
Kernel height.
k_w : int, optional
Kernel width.
stride_h : int, optional
Stride in rows.
stride_w : int, optional
Stride in cols.
stddev : float, optional
Initialization's standard deviation.
activation : arguments, optional
Function which applies a nonlinearity
padding : str, optional
'SAME' or 'VALID'
name : str, optional
Variable scope to use.
Returns
-------
x : Tensor
Convolved input.
"""
with tf.variable_scope(name):
w = tf.get_variable(
'w', [k_h, k_w, x.get_shape()[-1], n_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(
x, w, strides=[1, stride_h, stride_w, 1], padding=padding)
if bias:
b = tf.get_variable(
'b', [n_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = conv + b
return conv
def linear(x, n_units, scope=None, stddev=0.02,
activation=lambda x: x):
"""Fully-connected network.
Parameters
----------
x : Tensor
Input tensor to the network.
n_units : int
Number of units to connect to.
scope : str, optional
Variable scope to use.
stddev : float, optional
Initialization's standard deviation.
activation : arguments, optional
Function which applies a nonlinearity
Returns
-------
x : Tensor
Fully-connected output.
"""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], n_units], tf.float32,
tf.random_normal_initializer(stddev=stddev))
return activation(tf.matmul(x, matrix))
# %%
def weight_variable(shape):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
#initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
initial = tf.zeros(shape)
return tf.Variable(initial)
# %%
def bias_variable(shape):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial)
# %%
def dense_to_one_hot(labels, n_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
labels = np.array(labels)
n_labels = labels.shape[0]
index_offset = np.arange(n_labels) * n_classes
labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32)
labels_one_hot.flat[index_offset + labels.ravel()] = 1
return labels_one_hot
| models-master | transformer/tf_utils.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.pack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.pack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.pack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
return grid
def _transform(theta, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
theta = tf.reshape(theta, (-1, 2, 3))
theta = tf.cast(theta, 'float32')
# grid of (x_t, y_t, 1), eq (1) in ref [1]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.pack([num_batch]))
grid = tf.reshape(grid, tf.pack([num_batch, 3, -1]))
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.batch_matmul(theta, grid)
x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
x_s_flat = tf.reshape(x_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_size)
output = tf.reshape(
input_transformed, tf.pack([num_batch, out_height, out_width, num_channels]))
return output
with tf.variable_scope(name):
output = _transform(theta, U, out_size)
return output
def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'):
"""Batch Spatial Transformer Layer
Parameters
----------
U : float
tensor of inputs [num_batch,height,width,num_channels]
thetas : float
a set of transformations for each input [num_batch,num_transforms,6]
out_size : int
the size of the output [out_height,out_width]
Returns: float
Tensor of size [num_batch*num_transforms,out_height,out_width,num_channels]
"""
with tf.variable_scope(name):
num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2])
indices = [[i]*num_transforms for i in xrange(num_batch)]
input_repeated = tf.gather(U, tf.reshape(indices, [-1]))
return transformer(input_repeated, thetas, out_size)
| models-master | transformer/spatial_transformer.py |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Neural GPU Model."""
import time
import tensorflow as tf
import data_utils
def conv_linear(args, kw, kh, nin, nout, do_bias, bias_start, prefix):
"""Convolutional linear map."""
assert args is not None
if not isinstance(args, (list, tuple)):
args = [args]
with tf.variable_scope(prefix):
k = tf.get_variable("CvK", [kw, kh, nin, nout])
if len(args) == 1:
res = tf.nn.conv2d(args[0], k, [1, 1, 1, 1], "SAME")
else:
res = tf.nn.conv2d(tf.concat(3, args), k, [1, 1, 1, 1], "SAME")
if not do_bias: return res
bias_term = tf.get_variable("CvB", [nout],
initializer=tf.constant_initializer(0.0))
return res + bias_term + bias_start
def sigmoid_cutoff(x, cutoff):
"""Sigmoid with cutoff, e.g., 1.2sigmoid(x) - 0.1."""
y = tf.sigmoid(x)
if cutoff < 1.01: return y
d = (cutoff - 1.0) / 2.0
return tf.minimum(1.0, tf.maximum(0.0, cutoff * y - d))
def tanh_cutoff(x, cutoff):
"""Tanh with cutoff, e.g., 1.1tanh(x) cut to [-1. 1]."""
y = tf.tanh(x)
if cutoff < 1.01: return y
d = (cutoff - 1.0) / 2.0
return tf.minimum(1.0, tf.maximum(-1.0, (1.0 + d) * y))
def conv_gru(inpts, mem, kw, kh, nmaps, cutoff, prefix):
"""Convolutional GRU."""
def conv_lin(args, suffix, bias_start):
return conv_linear(args, kw, kh, len(args) * nmaps, nmaps, True, bias_start,
prefix + "/" + suffix)
reset = sigmoid_cutoff(conv_lin(inpts + [mem], "r", 1.0), cutoff)
# candidate = tanh_cutoff(conv_lin(inpts + [reset * mem], "c", 0.0), cutoff)
candidate = tf.tanh(conv_lin(inpts + [reset * mem], "c", 0.0))
gate = sigmoid_cutoff(conv_lin(inpts + [mem], "g", 1.0), cutoff)
return gate * mem + (1 - gate) * candidate
@tf.RegisterGradient("CustomIdG")
def _custom_id_grad(_, grads):
return grads
def quantize(t, quant_scale, max_value=1.0):
"""Quantize a tensor t with each element in [-max_value, max_value]."""
t = tf.minimum(max_value, tf.maximum(t, -max_value))
big = quant_scale * (t + max_value) + 0.5
with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}):
res = (tf.floor(big) / quant_scale) - max_value
return res
def quantize_weights_op(quant_scale, max_value):
ops = [v.assign(quantize(v, quant_scale, float(max_value)))
for v in tf.trainable_variables()]
return tf.group(*ops)
def relaxed_average(var_name_suffix, rx_step):
"""Calculate the average of relaxed variables having var_name_suffix."""
relaxed_vars = []
for l in xrange(rx_step):
with tf.variable_scope("RX%d" % l, reuse=True):
try:
relaxed_vars.append(tf.get_variable(var_name_suffix))
except ValueError:
pass
dsum = tf.add_n(relaxed_vars)
avg = dsum / len(relaxed_vars)
diff = [v - avg for v in relaxed_vars]
davg = tf.add_n([d*d for d in diff])
return avg, tf.reduce_sum(davg)
def relaxed_distance(rx_step):
"""Distance between relaxed variables and their average."""
res, ops, rx_done = [], [], {}
for v in tf.trainable_variables():
if v.name[0:2] == "RX":
rx_name = v.op.name[v.name.find("/") + 1:]
if rx_name not in rx_done:
avg, dist_loss = relaxed_average(rx_name, rx_step)
res.append(dist_loss)
rx_done[rx_name] = avg
ops.append(v.assign(rx_done[rx_name]))
return tf.add_n(res), tf.group(*ops)
def make_dense(targets, noclass):
"""Move a batch of targets to a dense 1-hot representation."""
with tf.device("/cpu:0"):
shape = tf.shape(targets)
batch_size = shape[0]
indices = targets + noclass * tf.range(0, batch_size)
length = tf.expand_dims(batch_size * noclass, 0)
dense = tf.sparse_to_dense(indices, length, 1.0, 0.0)
return tf.reshape(dense, [-1, noclass])
def check_for_zero(sparse):
"""In a sparse batch of ints, make 1.0 if it's 0 and 0.0 else."""
with tf.device("/cpu:0"):
shape = tf.shape(sparse)
batch_size = shape[0]
sparse = tf.minimum(sparse, 1)
indices = sparse + 2 * tf.range(0, batch_size)
dense = tf.sparse_to_dense(indices, tf.expand_dims(2 * batch_size, 0),
1.0, 0.0)
reshaped = tf.reshape(dense, [-1, 2])
return tf.reshape(tf.slice(reshaped, [0, 0], [-1, 1]), [-1])
class NeuralGPU(object):
"""Neural GPU Model."""
def __init__(self, nmaps, vec_size, niclass, noclass, dropout, rx_step,
max_grad_norm, cutoff, nconvs, kw, kh, height, mode,
learning_rate, pull, pull_incr, min_length, act_noise=0.0):
# Feeds for parameters and ops to update them.
self.global_step = tf.Variable(0, trainable=False)
self.cur_length = tf.Variable(min_length, trainable=False)
self.cur_length_incr_op = self.cur_length.assign_add(1)
self.lr = tf.Variable(float(learning_rate), trainable=False)
self.lr_decay_op = self.lr.assign(self.lr * 0.98)
self.pull = tf.Variable(float(pull), trainable=False)
self.pull_incr_op = self.pull.assign(self.pull * pull_incr)
self.do_training = tf.placeholder(tf.float32, name="do_training")
self.noise_param = tf.placeholder(tf.float32, name="noise_param")
# Feeds for inputs, targets, outputs, losses, etc.
self.input = []
self.target = []
for l in xrange(data_utils.forward_max + 1):
self.input.append(tf.placeholder(tf.int32, name="inp{0}".format(l)))
self.target.append(tf.placeholder(tf.int32, name="tgt{0}".format(l)))
self.outputs = []
self.losses = []
self.grad_norms = []
self.updates = []
# Computation.
inp0_shape = tf.shape(self.input[0])
batch_size = inp0_shape[0]
with tf.device("/cpu:0"):
emb_weights = tf.get_variable(
"embedding", [niclass, vec_size],
initializer=tf.random_uniform_initializer(-1.7, 1.7))
e0 = tf.scatter_update(emb_weights,
tf.constant(0, dtype=tf.int32, shape=[1]),
tf.zeros([1, vec_size]))
adam = tf.train.AdamOptimizer(self.lr, epsilon=1e-4)
# Main graph creation loop, for every bin in data_utils.
self.steps = []
for length in sorted(list(set(data_utils.bins + [data_utils.forward_max]))):
data_utils.print_out("Creating model for bin of length %d." % length)
start_time = time.time()
if length > data_utils.bins[0]:
tf.get_variable_scope().reuse_variables()
# Embed inputs and calculate mask.
with tf.device("/cpu:0"):
with tf.control_dependencies([e0]):
embedded = [tf.nn.embedding_lookup(emb_weights, self.input[l])
for l in xrange(length)]
# Mask to 0-out padding space in each step.
imask = [check_for_zero(self.input[l]) for l in xrange(length)]
omask = [check_for_zero(self.target[l]) for l in xrange(length)]
mask = [1.0 - (imask[i] * omask[i]) for i in xrange(length)]
mask = [tf.reshape(m, [-1, 1]) for m in mask]
# Use a shifted mask for step scaling and concatenated for weights.
shifted_mask = mask + [tf.zeros_like(mask[0])]
scales = [shifted_mask[i] * (1.0 - shifted_mask[i+1])
for i in xrange(length)]
scales = [tf.reshape(s, [-1, 1, 1, 1]) for s in scales]
mask = tf.concat(1, mask[0:length]) # batch x length
weights = mask
# Add a height dimension to mask to use later for masking.
mask = tf.reshape(mask, [-1, length, 1, 1])
mask = tf.concat(2, [mask for _ in xrange(height)]) + tf.zeros(
tf.pack([batch_size, length, height, nmaps]), dtype=tf.float32)
# Start is a length-list of batch-by-nmaps tensors, reshape and concat.
start = [tf.tanh(embedded[l]) for l in xrange(length)]
start = [tf.reshape(start[l], [-1, 1, nmaps]) for l in xrange(length)]
start = tf.reshape(tf.concat(1, start), [-1, length, 1, nmaps])
# First image comes from start by applying one convolution and adding 0s.
first = conv_linear(start, 1, 1, vec_size, nmaps, True, 0.0, "input")
first = [first] + [tf.zeros(tf.pack([batch_size, length, 1, nmaps]),
dtype=tf.float32) for _ in xrange(height - 1)]
first = tf.concat(2, first)
# Computation steps.
keep_prob = 1.0 - self.do_training * (dropout * 8.0 / float(length))
step = [tf.nn.dropout(first, keep_prob) * mask]
act_noise_scale = act_noise * self.do_training * self.pull
outputs = []
for it in xrange(length):
with tf.variable_scope("RX%d" % (it % rx_step)) as vs:
if it >= rx_step:
vs.reuse_variables()
cur = step[it]
# Do nconvs-many CGRU steps.
for layer in xrange(nconvs):
cur = conv_gru([], cur, kw, kh, nmaps, cutoff, "cgru_%d" % layer)
cur *= mask
outputs.append(tf.slice(cur, [0, 0, 0, 0], [-1, -1, 1, -1]))
cur = tf.nn.dropout(cur, keep_prob)
if act_noise > 0.00001:
cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale
step.append(cur * mask)
self.steps.append([tf.reshape(s, [-1, length, height * nmaps])
for s in step])
# Output is the n-th step output; n = current length, as in scales.
output = tf.add_n([outputs[i] * scales[i] for i in xrange(length)])
# Final convolution to get logits, list outputs.
output = conv_linear(output, 1, 1, nmaps, noclass, True, 0.0, "output")
output = tf.reshape(output, [-1, length, noclass])
external_output = [tf.reshape(o, [-1, noclass])
for o in list(tf.split(1, length, output))]
external_output = [tf.nn.softmax(o) for o in external_output]
self.outputs.append(external_output)
# Calculate cross-entropy loss and normalize it.
targets = tf.concat(1, [make_dense(self.target[l], noclass)
for l in xrange(length)])
targets = tf.reshape(targets, [-1, noclass])
xent = tf.reshape(tf.nn.softmax_cross_entropy_with_logits(
tf.reshape(output, [-1, noclass]), targets), [-1, length])
perp_loss = tf.reduce_sum(xent * weights)
perp_loss /= tf.cast(batch_size, dtype=tf.float32)
perp_loss /= length
# Final loss: cross-entropy + shared parameter relaxation part.
relax_dist, self.avg_op = relaxed_distance(rx_step)
total_loss = perp_loss + relax_dist * self.pull
self.losses.append(perp_loss)
# Gradients and Adam update operation.
if length == data_utils.bins[0] or (mode == 0 and
length < data_utils.bins[-1] + 1):
data_utils.print_out("Creating backward for bin of length %d." % length)
params = tf.trainable_variables()
grads = tf.gradients(total_loss, params)
grads, norm = tf.clip_by_global_norm(grads, max_grad_norm)
self.grad_norms.append(norm)
for grad in grads:
if isinstance(grad, tf.Tensor):
grad += tf.truncated_normal(tf.shape(grad)) * self.noise_param
update = adam.apply_gradients(zip(grads, params),
global_step=self.global_step)
self.updates.append(update)
data_utils.print_out("Created model for bin of length %d in"
" %.2f s." % (length, time.time() - start_time))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, sess, inp, target, do_backward, noise_param=None,
get_steps=False):
"""Run a step of the network."""
assert len(inp) == len(target)
length = len(target)
feed_in = {}
feed_in[self.noise_param.name] = noise_param if noise_param else 0.0
feed_in[self.do_training.name] = 1.0 if do_backward else 0.0
feed_out = []
index = len(data_utils.bins)
if length < data_utils.bins[-1] + 1:
index = data_utils.bins.index(length)
if do_backward:
feed_out.append(self.updates[index])
feed_out.append(self.grad_norms[index])
feed_out.append(self.losses[index])
for l in xrange(length):
feed_in[self.input[l].name] = inp[l]
for l in xrange(length):
feed_in[self.target[l].name] = target[l]
feed_out.append(self.outputs[index][l])
if get_steps:
for l in xrange(length+1):
feed_out.append(self.steps[index][l])
res = sess.run(feed_out, feed_in)
offset = 0
norm = None
if do_backward:
offset = 2
norm = res[1]
outputs = res[offset + 1:offset + 1 + length]
steps = res[offset + 1 + length:] if get_steps else None
return res[offset], outputs, norm, steps
| models-master | neural_gpu/neural_gpu.py |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional Gated Recurrent Networks for Algorithm Learning."""
import math
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
bins = [8, 12, 16, 20, 24, 28, 32, 36, 40, 48, 64, 128]
all_tasks = ["sort", "kvsort", "id", "rev", "rev2", "incr", "add", "left",
"right", "left-shift", "right-shift", "bmul", "mul", "dup",
"badd", "qadd", "search"]
forward_max = 128
log_filename = ""
def pad(l):
for b in bins:
if b >= l: return b
return forward_max
train_set = {}
test_set = {}
for some_task in all_tasks:
train_set[some_task] = []
test_set[some_task] = []
for all_max_len in xrange(10000):
train_set[some_task].append([])
test_set[some_task].append([])
def add(n1, n2, base=10):
"""Add two numbers represented as lower-endian digit lists."""
k = max(len(n1), len(n2)) + 1
d1 = n1 + [0 for _ in xrange(k - len(n1))]
d2 = n2 + [0 for _ in xrange(k - len(n2))]
res = []
carry = 0
for i in xrange(k):
if d1[i] + d2[i] + carry < base:
res.append(d1[i] + d2[i] + carry)
carry = 0
else:
res.append(d1[i] + d2[i] + carry - base)
carry = 1
while res and res[-1] == 0:
res = res[:-1]
if res: return res
return [0]
def init_data(task, length, nbr_cases, nclass):
"""Data initialization."""
def rand_pair(l, task):
"""Random data pair for a task. Total length should be <= l."""
k = (l-1)/2
base = 10
if task[0] == "b": base = 2
if task[0] == "q": base = 4
d1 = [np.random.randint(base) for _ in xrange(k)]
d2 = [np.random.randint(base) for _ in xrange(k)]
if task in ["add", "badd", "qadd"]:
res = add(d1, d2, base)
elif task in ["mul", "bmul"]:
d1n = sum([d * (base ** i) for i, d in enumerate(d1)])
d2n = sum([d * (base ** i) for i, d in enumerate(d2)])
if task == "bmul":
res = [int(x) for x in list(reversed(str(bin(d1n * d2n))))[:-2]]
else:
res = [int(x) for x in list(reversed(str(d1n * d2n)))]
else:
sys.exit()
sep = [12]
if task in ["add", "badd", "qadd"]: sep = [11]
inp = [d + 1 for d in d1] + sep + [d + 1 for d in d2]
return inp, [r + 1 for r in res]
def rand_dup_pair(l):
"""Random data pair for duplication task. Total length should be <= l."""
k = l/2
x = [np.random.randint(nclass - 1) + 1 for _ in xrange(k)]
inp = x + [0 for _ in xrange(l - k)]
res = x + x + [0 for _ in xrange(l - 2*k)]
return inp, res
def rand_rev2_pair(l):
"""Random data pair for reverse2 task. Total length should be <= l."""
inp = [(np.random.randint(nclass - 1) + 1,
np.random.randint(nclass - 1) + 1) for _ in xrange(l/2)]
res = [i for i in reversed(inp)]
return [x for p in inp for x in p], [x for p in res for x in p]
def rand_search_pair(l):
"""Random data pair for search task. Total length should be <= l."""
inp = [(np.random.randint(nclass - 1) + 1,
np.random.randint(nclass - 1) + 1) for _ in xrange(l-1/2)]
q = np.random.randint(nclass - 1) + 1
res = 0
for (k, v) in reversed(inp):
if k == q:
res = v
return [x for p in inp for x in p] + [q], [res]
def rand_kvsort_pair(l):
"""Random data pair for key-value sort. Total length should be <= l."""
keys = [(np.random.randint(nclass - 1) + 1, i) for i in xrange(l/2)]
vals = [np.random.randint(nclass - 1) + 1 for _ in xrange(l/2)]
kv = [(k, vals[i]) for (k, i) in keys]
sorted_kv = [(k, vals[i]) for (k, i) in sorted(keys)]
return [x for p in kv for x in p], [x for p in sorted_kv for x in p]
def spec(inp):
"""Return the target given the input for some tasks."""
if task == "sort":
return sorted(inp)
elif task == "id":
return inp
elif task == "rev":
return [i for i in reversed(inp)]
elif task == "incr":
carry = 1
res = []
for i in xrange(len(inp)):
if inp[i] + carry < nclass:
res.append(inp[i] + carry)
carry = 0
else:
res.append(1)
carry = 1
return res
elif task == "left":
return [inp[0]]
elif task == "right":
return [inp[-1]]
elif task == "left-shift":
return [inp[l-1] for l in xrange(len(inp))]
elif task == "right-shift":
return [inp[l+1] for l in xrange(len(inp))]
else:
print_out("Unknown spec for task " + str(task))
sys.exit()
l = length
cur_time = time.time()
total_time = 0.0
for case in xrange(nbr_cases):
total_time += time.time() - cur_time
cur_time = time.time()
if l > 10000 and case % 100 == 1:
print_out(" avg gen time %.4f s" % (total_time / float(case)))
if task in ["add", "badd", "qadd", "bmul", "mul"]:
i, t = rand_pair(l, task)
train_set[task][len(i)].append([i, t])
i, t = rand_pair(l, task)
test_set[task][len(i)].append([i, t])
elif task == "dup":
i, t = rand_dup_pair(l)
train_set[task][len(i)].append([i, t])
i, t = rand_dup_pair(l)
test_set[task][len(i)].append([i, t])
elif task == "rev2":
i, t = rand_rev2_pair(l)
train_set[task][len(i)].append([i, t])
i, t = rand_rev2_pair(l)
test_set[task][len(i)].append([i, t])
elif task == "search":
i, t = rand_search_pair(l)
train_set[task][len(i)].append([i, t])
i, t = rand_search_pair(l)
test_set[task][len(i)].append([i, t])
elif task == "kvsort":
i, t = rand_kvsort_pair(l)
train_set[task][len(i)].append([i, t])
i, t = rand_kvsort_pair(l)
test_set[task][len(i)].append([i, t])
else:
inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)]
target = spec(inp)
train_set[task][l].append([inp, target])
inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)]
target = spec(inp)
test_set[task][l].append([inp, target])
def to_symbol(i):
"""Covert ids to text."""
if i == 0: return ""
if i == 11: return "+"
if i == 12: return "*"
return str(i-1)
def to_id(s):
"""Covert text to ids."""
if s == "+": return 11
if s == "*": return 12
return int(s) + 1
def get_batch(max_length, batch_size, do_train, task, offset=None, preset=None):
"""Get a batch of data, training or testing."""
inputs = []
targets = []
length = max_length
if preset is None:
cur_set = test_set[task]
if do_train: cur_set = train_set[task]
while not cur_set[length]:
length -= 1
pad_length = pad(length)
for b in xrange(batch_size):
if preset is None:
elem = random.choice(cur_set[length])
if offset is not None and offset + b < len(cur_set[length]):
elem = cur_set[length][offset + b]
else:
elem = preset
inp, target = elem[0], elem[1]
assert len(inp) == length
inputs.append(inp + [0 for l in xrange(pad_length - len(inp))])
targets.append(target + [0 for l in xrange(pad_length - len(target))])
res_input = []
res_target = []
for l in xrange(pad_length):
new_input = np.array([inputs[b][l] for b in xrange(batch_size)],
dtype=np.int32)
new_target = np.array([targets[b][l] for b in xrange(batch_size)],
dtype=np.int32)
res_input.append(new_input)
res_target.append(new_target)
return res_input, res_target
def print_out(s, newline=True):
"""Print a message out and log it to file."""
if log_filename:
try:
with gfile.GFile(log_filename, mode="a") as f:
f.write(s + ("\n" if newline else ""))
# pylint: disable=bare-except
except:
sys.stdout.write("Error appending to %s\n" % log_filename)
sys.stdout.write(s + ("\n" if newline else ""))
sys.stdout.flush()
def decode(output):
return [np.argmax(o, axis=1) for o in output]
def accuracy(inpt, output, target, batch_size, nprint):
"""Calculate output accuracy given target."""
assert nprint < batch_size + 1
def task_print(inp, output, target):
stop_bound = 0
print_len = 0
while print_len < len(target) and target[print_len] > stop_bound:
print_len += 1
print_out(" i: " + " ".join([str(i - 1) for i in inp if i > 0]))
print_out(" o: " +
" ".join([str(output[l] - 1) for l in xrange(print_len)]))
print_out(" t: " +
" ".join([str(target[l] - 1) for l in xrange(print_len)]))
decoded_target = target
decoded_output = decode(output)
total = 0
errors = 0
seq = [0 for b in xrange(batch_size)]
for l in xrange(len(decoded_output)):
for b in xrange(batch_size):
if decoded_target[l][b] > 0:
total += 1
if decoded_output[l][b] != decoded_target[l][b]:
seq[b] = 1
errors += 1
e = 0 # Previous error index
for _ in xrange(min(nprint, sum(seq))):
while seq[e] == 0:
e += 1
task_print([inpt[l][e] for l in xrange(len(inpt))],
[decoded_output[l][e] for l in xrange(len(decoded_target))],
[decoded_target[l][e] for l in xrange(len(decoded_target))])
e += 1
for b in xrange(nprint - errors):
task_print([inpt[l][b] for l in xrange(len(inpt))],
[decoded_output[l][b] for l in xrange(len(decoded_target))],
[decoded_target[l][b] for l in xrange(len(decoded_target))])
return errors, total, sum(seq)
def safe_exp(x):
perp = 10000
if x < 100: perp = math.exp(x)
if perp > 10000: return 10000
return perp
| models-master | neural_gpu/data_utils.py |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural GPU for Learning Algorithms."""
import math
import os
import random
import sys
import time
import matplotlib.animation as anim
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import data_utils as data
import neural_gpu
tf.app.flags.DEFINE_float("lr", 0.001, "Learning rate.")
tf.app.flags.DEFINE_float("init_weight", 1.0, "Initial weights deviation.")
tf.app.flags.DEFINE_float("max_grad_norm", 1.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.")
tf.app.flags.DEFINE_float("pull", 0.0005, "Starting pull of the relaxations.")
tf.app.flags.DEFINE_float("pull_incr", 1.2, "Increase pull by that much.")
tf.app.flags.DEFINE_float("curriculum_bound", 0.15, "Move curriculum < this.")
tf.app.flags.DEFINE_float("dropout", 0.15, "Dropout that much.")
tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.")
tf.app.flags.DEFINE_integer("low_batch_size", 16, "Low batch size.")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200, "Steps per epoch.")
tf.app.flags.DEFINE_integer("nmaps", 128, "Number of floats in each cell.")
tf.app.flags.DEFINE_integer("niclass", 33, "Number of classes (0 is padding).")
tf.app.flags.DEFINE_integer("noclass", 33, "Number of classes (0 is padding).")
tf.app.flags.DEFINE_integer("train_data_size", 5000, "Training examples/len.")
tf.app.flags.DEFINE_integer("max_length", 41, "Maximum length.")
tf.app.flags.DEFINE_integer("rx_step", 6, "Relax that many recursive steps.")
tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.")
tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.")
tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.")
tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.")
tf.app.flags.DEFINE_integer("height", 4, "Height.")
tf.app.flags.DEFINE_integer("forward_max", 401, "Maximum forward length.")
tf.app.flags.DEFINE_integer("jobid", -1, "Task id when running on borg.")
tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.")
tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.")
tf.app.flags.DEFINE_bool("animate", False, "Whether to produce an animation.")
tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.")
tf.app.flags.DEFINE_string("task", "rev", "Which task are we learning?")
tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.")
tf.app.flags.DEFINE_string("ensemble", "", "Model paths for ensemble.")
FLAGS = tf.app.flags.FLAGS
EXTRA_EVAL = 12
def initialize(sess):
"""Initialize data and model."""
if FLAGS.jobid >= 0:
data.log_filename = os.path.join(FLAGS.train_dir, "log%d" % FLAGS.jobid)
data.print_out("NN ", newline=False)
# Set random seed.
seed = FLAGS.random_seed + max(0, FLAGS.jobid)
tf.set_random_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Check data sizes.
assert data.bins
min_length = 3
max_length = min(FLAGS.max_length, data.bins[-1])
assert max_length + 1 > min_length
while len(data.bins) > 1 and data.bins[-2] > max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
assert data.bins[0] > FLAGS.rx_step
data.forward_max = max(FLAGS.forward_max, data.bins[-1])
nclass = min(FLAGS.niclass, FLAGS.noclass)
data_size = FLAGS.train_data_size if FLAGS.mode == 0 else 1000
# Initialize data for each task.
tasks = FLAGS.task.split("-")
for t in tasks:
for l in xrange(max_length + EXTRA_EVAL - 1):
data.init_data(t, l, data_size, nclass)
data.init_data(t, data.bins[-2], data_size, nclass)
data.init_data(t, data.bins[-1], data_size, nclass)
end_size = 4 * 1024 if FLAGS.mode > 0 else 1024
data.init_data(t, data.forward_max, end_size, nclass)
# Print out parameters.
curriculum = FLAGS.curriculum_bound
msg1 = ("layers %d kw %d h %d kh %d relax %d batch %d noise %.2f task %s"
% (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh, FLAGS.rx_step,
FLAGS.batch_size, FLAGS.grad_noise_scale, FLAGS.task))
msg2 = "data %d %s" % (FLAGS.train_data_size, msg1)
msg3 = ("cut %.2f pull %.3f lr %.2f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s" %
(FLAGS.cutoff, FLAGS.pull_incr, FLAGS.lr, FLAGS.init_weight,
curriculum, FLAGS.nmaps, FLAGS.dropout, FLAGS.max_grad_norm, msg2))
data.print_out(msg3)
# Create checkpoint directory if it does not exist.
checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s"
% ("" if FLAGS.jobid < 0 else str(FLAGS.jobid)))
if not gfile.IsDirectory(checkpoint_dir):
data.print_out("Creating checkpoint directory %s." % checkpoint_dir)
gfile.MkDir(checkpoint_dir)
# Create model and initialize it.
tf.get_variable_scope().set_initializer(
tf.uniform_unit_scaling_initializer(factor=1.8 * FLAGS.init_weight))
model = neural_gpu.NeuralGPU(
FLAGS.nmaps, FLAGS.nmaps, FLAGS.niclass, FLAGS.noclass, FLAGS.dropout,
FLAGS.rx_step, FLAGS.max_grad_norm, FLAGS.cutoff, FLAGS.nconvs,
FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mode, FLAGS.lr,
FLAGS.pull, FLAGS.pull_incr, min_length + 3)
data.print_out("Created model.")
sess.run(tf.initialize_all_variables())
data.print_out("Initialized variables.")
# Load model from parameters if a checkpoint exists.
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
data.print_out("Reading model parameters from %s"
% ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
# Check if there are ensemble models and get their checkpoints.
ensemble = []
ensemble_dir_list = [d for d in FLAGS.ensemble.split(",") if d]
for ensemble_dir in ensemble_dir_list:
ckpt = tf.train.get_checkpoint_state(ensemble_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
data.print_out("Found ensemble model %s" % ckpt.model_checkpoint_path)
ensemble.append(ckpt.model_checkpoint_path)
# Return the model and needed variables.
return (model, min_length, max_length, checkpoint_dir, curriculum, ensemble)
def single_test(l, model, sess, task, nprint, batch_size, print_out=True,
offset=None, ensemble=None, get_steps=False):
"""Test model on test data of length l using the given session."""
inpt, target = data.get_batch(l, batch_size, False, task, offset)
_, res, _, steps = model.step(sess, inpt, target, False, get_steps=get_steps)
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" %s len %d errors %.2f sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
# Ensemble eval.
if ensemble:
results = []
for m in ensemble:
model.saver.restore(sess, m)
_, result, _, _ = model.step(sess, inpt, target, False)
m_errors, m_total, m_seq_err = data.accuracy(inpt, result, target,
batch_size, nprint)
m_seq_err = float(m_seq_err) / batch_size
if total > 0:
m_errors = float(m_errors) / m_total
data.print_out(" %s len %d m-errors %.2f m-sequence-errors %.2f"
% (task, l, 100*m_errors, 100*m_seq_err))
results.append(result)
ens = [sum(o) for o in zip(*results)]
errors, total, seq_err = data.accuracy(inpt, ens, target,
batch_size, nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" %s len %d ens-errors %.2f ens-sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
return errors, seq_err, (steps, inpt, [np.argmax(o, axis=1) for o in res])
def multi_test(l, model, sess, task, nprint, batch_size, offset=None,
ensemble=None):
"""Run multiple tests at lower batch size to save memory."""
errors, seq_err = 0.0, 0.0
to_print = nprint
low_batch = FLAGS.low_batch_size
low_batch = min(low_batch, batch_size)
for mstep in xrange(batch_size / low_batch):
cur_offset = None if offset is None else offset + mstep * low_batch
err, sq_err, _ = single_test(l, model, sess, task, to_print, low_batch,
False, cur_offset, ensemble=ensemble)
to_print = max(0, to_print - low_batch)
errors += err
seq_err += sq_err
if FLAGS.mode > 0:
cur_errors = float(low_batch * errors) / ((mstep+1) * low_batch)
cur_seq_err = float(low_batch * seq_err) / ((mstep+1) * low_batch)
data.print_out(" %s multitest current errors %.2f sequence-errors %.2f"
% (task, 100*cur_errors, 100*cur_seq_err))
errors = float(low_batch) * float(errors) / batch_size
seq_err = float(low_batch) * float(seq_err) / batch_size
data.print_out(" %s len %d errors %.2f sequence-errors %.2f"
% (task, l, 100*errors, 100*seq_err))
return errors, seq_err
def train():
"""Train the model."""
batch_size = FLAGS.batch_size
tasks = FLAGS.task.split("-")
with tf.Session() as sess:
(model, min_length, max_length, checkpoint_dir,
curriculum, _) = initialize(sess)
quant_op = neural_gpu.quantize_weights_op(512, 8)
max_cur_length = min(min_length + 3, max_length)
prev_acc_perp = [1000000 for _ in xrange(3)]
prev_seq_err = 1.0
# Main traning loop.
while True:
global_step, pull, max_cur_length, learning_rate = sess.run(
[model.global_step, model.pull, model.cur_length, model.lr])
acc_loss, acc_total, acc_errors, acc_seq_err = 0.0, 0, 0, 0
acc_grad_norm, step_count, step_time = 0.0, 0, 0.0
for _ in xrange(FLAGS.steps_per_checkpoint):
global_step += 1
task = random.choice(tasks)
# Select the length for curriculum learning.
l = np.random.randint(max_cur_length - min_length + 1) + min_length
# Prefer longer stuff 60% of time.
if np.random.randint(100) < 60:
l1 = np.random.randint(max_cur_length - min_length+1) + min_length
l = max(l, l1)
# Mixed curriculum learning: in 25% of cases go to any larger length.
if np.random.randint(100) < 25:
l1 = np.random.randint(max_length - min_length + 1) + min_length
l = max(l, l1)
# Run a step and time it.
start_time = time.time()
inp, target = data.get_batch(l, batch_size, True, task)
noise_param = math.sqrt(math.pow(global_step, -0.55) *
prev_seq_err) * FLAGS.grad_noise_scale
loss, res, gnorm, _ = model.step(sess, inp, target, True, noise_param)
step_time += time.time() - start_time
acc_grad_norm += float(gnorm)
# Accumulate statistics only if we did not exceed curriculum length.
if l < max_cur_length + 1:
step_count += 1
acc_loss += loss
errors, total, seq_err = data.accuracy(inp, res, target,
batch_size, 0)
acc_total += total
acc_errors += errors
acc_seq_err += seq_err
# Normalize and print out accumulated statistics.
acc_loss /= step_count
step_time /= FLAGS.steps_per_checkpoint
acc_seq_err = float(acc_seq_err) / (step_count * batch_size)
prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%.
acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0
msg1 = "step %d step-time %.2f" % (global_step, step_time)
msg2 = "lr %.8f pull %.3f" % (learning_rate, pull)
msg3 = ("%s %s grad-norm %.8f"
% (msg1, msg2, acc_grad_norm / FLAGS.steps_per_checkpoint))
data.print_out("%s len %d ppx %.8f errors %.2f sequence-errors %.2f" %
(msg3, max_cur_length, data.safe_exp(acc_loss),
100*acc_errors, 100*acc_seq_err))
# If errors are below the curriculum threshold, move curriculum forward.
if curriculum > acc_seq_err:
if FLAGS.quantize:
# Quantize weights.
data.print_out(" Quantizing parameters.")
sess.run([quant_op])
# Increase current length (until the next with training data).
do_incr = True
while do_incr and max_cur_length < max_length:
sess.run(model.cur_length_incr_op)
for t in tasks:
if data.train_set[t]: do_incr = False
# Forget last perplexities if we're not yet at the end.
if max_cur_length < max_length:
prev_acc_perp.append(1000000)
# Either increase pull or, if it's large, average parameters.
if pull < 0.1:
sess.run(model.pull_incr_op)
else:
data.print_out(" Averaging parameters.")
sess.run(model.avg_op)
if acc_seq_err < (curriculum / 3.0):
sess.run(model.lr_decay_op)
# Lower learning rate if we're worse than the last 3 checkpoints.
acc_perp = data.safe_exp(acc_loss)
if acc_perp > max(prev_acc_perp[-3:]):
sess.run(model.lr_decay_op)
prev_acc_perp.append(acc_perp)
# Save checkpoint.
checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt")
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
# Run evaluation.
bound = data.bins[-1] + 1
for t in tasks:
l = min_length
while l < max_length + EXTRA_EVAL and l < bound:
_, seq_err, _ = single_test(l, model, sess, t,
FLAGS.nprint, batch_size)
l += 1
while l < bound + 1 and not data.test_set[t][l]:
l += 1
if seq_err < 0.05: # Run larger test if we're good enough.
_, seq_err = multi_test(data.forward_max, model, sess, t,
FLAGS.nprint, batch_size * 4)
if seq_err < 0.01: # Super-large test on 1-task large-forward models.
if data.forward_max > 4000 and len(tasks) == 1:
multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
batch_size * 16, 0)
def animate(l, test_data, anim_size):
"""Create animation for the given data (hacky matplotlib use)."""
xf = 12 # Extra frames to slow down at start and end.
fps = 2 # Frames per step.
# Make the figure.
fig = plt.figure(figsize=(16, 9), facecolor="white")
ax = fig.add_axes([0, 0, 1, 1], frameon=False, zorder=2)
ax.set_xticks([i * 24-0.5 for i in xrange(4)])
ax.set_xticklabels([])
ax.set_yticks([i - 0.5 for i in xrange(l+1)])
ax.grid(which="major", axis="both", linestyle="-", color="black")
# We need text fields.
text_fields = []
text_size = 24*32/l
for y in xrange(l):
text_fields.append(ax.text(
11.25, y + 0.15, "", color="g", ha="center", va="center",
bbox={"facecolor": "b", "alpha": 0.01, "pad": 24 * text_size},
size=text_size - (4 * 32 / l), animated=True))
im = ax.imshow(np.zeros_like(test_data[0][0][0]), vmin=-1.0,
vmax=1.0, cmap="gray", aspect="auto", origin="upper",
interpolation="none", animated=True)
im.set_zorder(1)
# Main animation step.
def animation_update(frame_no, test_data, xf, im, text_fields):
"""Update an animation frame."""
steps, inpt, out_raw = test_data
length = len(steps)
batch = frame_no / (fps * (l+4*xf))
index = int((frame_no % (fps * (l+4*xf))) / fps)
# Cut output after first padding.
out = [out_raw[i][batch] for i in xrange(len(text_fields))]
if 0 in out:
i = out.index(0)
out = out[0:i] + [0 for _ in xrange(len(out) - i)]
# Show the state after the first frames.
if index >= 2*xf:
im.set_array(steps[min(length - 1, index - 2*xf)][batch])
for i, t in enumerate(text_fields):
if index - 2*xf < length:
t.set_text("")
else:
t.set_text(data.to_symbol(out[i]))
else:
for i, t in enumerate(text_fields):
t.set_text(data.to_symbol(inpt[i][batch]) if index < xf else "")
if index < xf:
im.set_array(np.zeros_like(steps[0][0]))
else:
im.set_array(steps[0][batch])
return im,
# Create the animation and save to mp4.
animation = anim.FuncAnimation(
fig, animation_update, blit=True, frames=(l+4*xf)*anim_size*fps,
interval=500/fps, fargs=(test_data, xf, im, text_fields))
animation.save("/tmp/neural_gpu.mp4", writer="mencoder", fps=4*fps, dpi=3*80)
def evaluate():
"""Evaluate an existing model."""
batch_size = FLAGS.batch_size
tasks = FLAGS.task.split("-")
with tf.Session() as sess:
model, min_length, max_length, _, _, ensemble = initialize(sess)
bound = data.bins[-1] + 1
for t in tasks:
l = min_length
while l < max_length + EXTRA_EVAL and l < bound:
_, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
batch_size, ensemble=ensemble)
l += 1
while l < bound + 1 and not data.test_set[t][l]:
l += 1
# Animate.
if FLAGS.animate:
anim_size = 2
_, _, test_data = single_test(l, model, sess, t, 0, anim_size,
get_steps=True)
animate(l, test_data, anim_size)
# More tests.
_, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
batch_size * 4, ensemble=ensemble)
if seq_err < 0.01: # Super-test if we're very good and in large-test mode.
if data.forward_max > 4000 and len(tasks) == 1:
multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
batch_size * 64, 0, ensemble=ensemble)
def interactive():
"""Interactively probe an existing model."""
with tf.Session() as sess:
model, _, _, _, _, _ = initialize(sess)
sys.stdout.write("Input to Neural GPU, e.g., 0 1. Use -1 for PAD.\n")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline()
while inpt:
ids = [data.to_id(s) for s in inpt.strip().split()]
inpt, target = data.get_batch(len(ids), 1, False, "",
preset=(ids, [0 for _ in ids]))
_, res, _, _ = model.step(sess, inpt, target, False)
res = [np.argmax(o, axis=1) for o in res]
res = [o for o in res[:len(ids)] if o > 0]
print " " + " ".join([data.to_symbol(output[0]) for output in res])
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline()
def main(_):
if FLAGS.mode == 0:
train()
elif FLAGS.mode == 1:
evaluate()
else:
interactive()
if __name__ == "__main__":
tf.app.run()
| models-master | neural_gpu/neural_gpu_trainer.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: datum.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='datum.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\x0b\x64\x61tum.proto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channels', full_name='Datum.channels', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='Datum.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='Datum.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Datum.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='Datum.label', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_data', full_name='Datum.float_data', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoded', full_name='Datum.encoded', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=145,
)
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'datum_pb2'
# @@protoc_insertion_point(class_scope:Datum)
))
_sym_db.RegisterMessage(Datum)
# @@protoc_insertion_point(module_scope)
| models-master | tfrecord-utils/datum_pb2.py |
#!/bin/python
import tensorflow as tf
import os
import sys
import numpy as np
from numpy import *
import scipy.io
import multiprocessing
import datum_pb2
import lmdb
import timeit
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# images and labels array as input
def convert_to(images, labels, name):
num_examples = labels.shape[0]
if images.shape[0] != num_examples:
raise ValueError("Images size %d does not match label size %d." %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
def convert_image(image, label, name):
rows = image.shape[0]
cols = image.shape[1]
depth = image.shape[2]
name = name.replace("/", "-")
name = "train-"+name
filename = os.path.join(sys.argv[2], name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
image_raw = image.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(label)),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
input_lmdb_env = lmdb.open(sys.argv[1], readonly=True)
input_lmdb_txn = input_lmdb_env.begin()
input_lmdb_cursor = input_lmdb_txn.cursor()
datum = datum_pb2.Datum() # Can create this before and pass in
for key, value in input_lmdb_cursor:
datum.ParseFromString(value)
label = datum.label
img_data = np.array(bytearray(datum.data)).reshape(datum.channels, datum.height, datum.width)
convert_image(img_data, label, key)
| models-master | tfrecord-utils/make_tfrecord.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library showing off sequence recognition and generation with the simple
example of names.
We use recurrent neural nets to learn complex functions able to recogize and
generate sequences of a given form. This can be used for natural language
syntax recognition, dynamically generating maps or puzzles and of course
baby name generation.
Before using this module, it is recommended to read the Tensorflow tutorial on
recurrent neural nets, as it explains the basic concepts of this model, and
will show off another module, the PTB module on which this model bases itself.
Here is an overview of the functions available in this module:
* RNN Module for sequence functions based on PTB
* Name recognition specifically for recognizing names, but can be adapted to
recognizing sequence patterns
* Name generations specifically for generating names, but can be adapted to
generating arbitrary sequence patterns
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import numpy as np
from model import NamignizerModel
import data_utils
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 27
epoch_size = 100
class LargeConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 27
epoch_size = 100
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 27
epoch_size = 100
def run_epoch(session, m, names, counts, epoch_size, eval_op, verbose=False):
"""Runs the model on the given data for one epoch
Args:
session: the tf session holding the model graph
m: an instance of the NamignizerModel
names: a set of lowercase names of 26 characters
counts: a list of the frequency of the above names
epoch_size: the number of batches to run
eval_op: whether to change the params or not, and how to do it
Kwargs:
verbose: whether to print out state of training during the epoch
Returns:
cost: the average cost during the last stage of the epoch
"""
start_time = time.time()
costs = 0.0
iters = 0
for step, (x, y) in enumerate(data_utils.namignizer_iterator(names, counts,
m.batch_size, m.num_steps, epoch_size)):
cost, _ = session.run([m.cost, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: m.initial_state.eval(),
m.weights: np.ones(m.batch_size * m.num_steps)})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 9:
print("%.3f perplexity: %.3f speed: %.0f lps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
if step >= epoch_size:
break
return np.exp(costs / iters)
def train(data_dir, checkpoint_path, config):
"""Trains the model with the given data
Args:
data_dir: path to the data for the model (see data_utils for data
format)
checkpoint_path: the path to save the trained model checkpoints
config: one of the above configs that specify the model and how it
should be run and trained
Returns:
None
"""
# Prepare Name data.
print("Reading Name data in %s" % data_dir)
names, counts = data_utils.read_names(data_dir)
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = NamignizerModel(is_training=True, config=config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" %
(i + 1, train_perplexity))
m.saver.save(session, checkpoint_path, global_step=i)
def namignize(names, checkpoint_path, config):
"""Recognizes names and prints the Perplexity of the model for each names
in the list
Args:
names: a list of names in the model format
checkpoint_path: the path to restore the trained model from, should not
include the model name, just the path to
config: one of the above configs that specify the model and how it
should be run and trained
Returns:
None
"""
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model"):
m = NamignizerModel(is_training=False, config=config)
m.saver.restore(session, checkpoint_path)
for name in names:
x, y = data_utils.name_to_batch(name, m.batch_size, m.num_steps)
cost, loss, _ = session.run([m.cost, m.loss, tf.no_op()],
{m.input_data: x,
m.targets: y,
m.initial_state: m.initial_state.eval(),
m.weights: np.concatenate((
np.ones(len(name)), np.zeros(m.batch_size * m.num_steps - len(name))))})
print("Name {} gives us a perplexity of {}".format(
name, np.exp(cost)))
def namignator(checkpoint_path, config):
"""Generates names randomly according to a given model
Args:
checkpoint_path: the path to restore the trained model from, should not
include the model name, just the path to
config: one of the above configs that specify the model and how it
should be run and trained
Returns:
None
"""
# mutate the config to become a name generator config
config.num_steps = 1
config.batch_size = 1
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model"):
m = NamignizerModel(is_training=False, config=config)
m.saver.restore(session, checkpoint_path)
activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()],
{m.input_data: np.zeros((1, 1)),
m.targets: np.zeros((1, 1)),
m.initial_state: m.initial_state.eval(),
m.weights: np.ones(1)})
# sample from our softmax activations
next_letter = np.random.choice(27, p=activations[0])
name = [next_letter]
while next_letter != 0:
activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()],
{m.input_data: [[next_letter]],
m.targets: np.zeros((1, 1)),
m.initial_state: final_state,
m.weights: np.ones(1)})
next_letter = np.random.choice(27, p=activations[0])
name += [next_letter]
print(map(lambda x: chr(x + 96), name))
if __name__ == "__main__":
# train("data/SmallNames.txt", "model/namignizer", SmallConfig)
# namignize(["mary", "ida", "gazorbazorb", "mmmhmm", "bob"],
# tf.train.latest_checkpoint("model"), SmallConfig)
# namignator(tf.train.latest_checkpoint("model"), SmallConfig)
| models-master | namignizer/names.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RNN model with embeddings"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class NamignizerModel(object):
"""The Namignizer model ~ strongly based on PTB"""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
# will always be 27
vocab_size = config.vocab_size
# placeholders for inputs
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# weights for the loss function
self._weights = tf.placeholder(tf.float32, [batch_size * num_steps])
# lstm for our RNN cell (GRU supported too)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[self._weights])
self._loss = loss
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
# probabilities of each letter
self._activations = tf.nn.softmax(logits)
# ability to save the model
self.saver = tf.train.Saver(tf.all_variables())
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def activations(self):
return self._activations
@property
def weights(self):
return self._weights
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def loss(self):
return self._loss
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
| models-master | namignizer/model.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing Kaggle baby names files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import tensorflow as tf
import pandas as pd
# the default end of name rep will be zero
_EON = 0
def read_names(names_path):
"""read data from downloaded file. See SmallNames.txt for example format
or go to https://www.kaggle.com/kaggle/us-baby-names for full lists
Args:
names_path: path to the csv file similar to the example type
Returns:
Dataset: a namedtuple of two elements: deduped names and their associated
counts. The names contain only 26 chars and are all lower case
"""
names_data = pd.read_csv(names_path)
names_data.Name = names_data.Name.str.lower()
name_data = names_data.groupby(by=["Name"])["Count"].sum()
name_counts = np.array(name_data.tolist())
names_deduped = np.array(name_data.index.tolist())
Dataset = collections.namedtuple('Dataset', ['Name', 'Count'])
return Dataset(names_deduped, name_counts)
def _letter_to_number(letter):
"""converts letters to numbers between 1 and 27"""
# ord of lower case 'a' is 97
return ord(letter) - 96
def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
"""Takes a list of names and counts like those output from read_names, and
makes an iterator yielding a batch_size by num_steps array of random names
separated by an end of name token. The names are choosen randomly according
to their counts. The batch may end mid-name
Args:
names: a set of lowercase names composed of 26 characters
counts: a list of the frequency of those names
batch_size: int
num_steps: int
epoch_size: number of batches to yield
Yields:
(x, y): a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target
"""
name_distribution = counts / counts.sum()
for i in range(epoch_size):
data = np.zeros(batch_size * num_steps + 1)
samples = np.random.choice(names, size=batch_size * num_steps // 2,
replace=True, p=name_distribution)
data_index = 0
for sample in samples:
if data_index >= batch_size * num_steps:
break
for letter in map(_letter_to_number, sample) + [_EON]:
if data_index >= batch_size * num_steps:
break
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
yield (x, y)
def name_to_batch(name, batch_size, num_steps):
""" Takes a single name and fills a batch with it
Args:
name: lowercase composed of 26 characters
batch_size: int
num_steps: int
Returns:
x, y: a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target. The array is filled up
to the length of the string, the rest is filled with zeros
"""
data = np.zeros(batch_size * num_steps + 1)
data_index = 0
for letter in map(_letter_to_number, name) + [_EON]:
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
return x, y
| models-master | namignizer/data_utils.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_builder."""
# disable=no-name-in-module,unused-import,g-bad-import-order,maybe-no-member
import os.path
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from syntaxnet import graph_builder
from syntaxnet import sparse_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class GraphBuilderTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(
FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def MakeBuilder(self, use_averaging=True, **kw_args):
# Set the seed and gate_gradients to ensure reproducibility.
return graph_builder.GreedyParser(
self._num_actions, self._num_features, self._num_feature_ids,
embedding_sizes=[8, 8, 8], hidden_layer_sizes=[32, 32], seed=42,
gate_gradients=True, use_averaging=use_averaging, **kw_args)
def FindNode(self, name):
for node in tf.get_default_graph().as_graph_def().node:
if node.name == name:
return node
return None
def NodeFound(self, name):
return self.FindNode(name) is not None
def testScope(self):
# Set up the network topology
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder()
parser.AddTraining(self._task_context,
batch_size=10,
corpus_name='training-corpus')
parser.AddEvaluation(self._task_context,
batch_size=2,
corpus_name='tuning-corpus')
parser.AddSaver()
# Check that the node ids we may rely on are there with the expected
# names.
self.assertEqual(parser.training['logits'].name, 'training/logits:0')
self.assertTrue(self.NodeFound('training/logits'))
self.assertTrue(self.NodeFound('training/feature_0'))
self.assertTrue(self.NodeFound('training/feature_1'))
self.assertTrue(self.NodeFound('training/feature_2'))
self.assertFalse(self.NodeFound('training/feature_3'))
self.assertEqual(parser.evaluation['logits'].name, 'evaluation/logits:0')
self.assertTrue(self.NodeFound('evaluation/logits'))
# The saver node is expected to be in the root scope.
self.assertTrue(self.NodeFound('save/restore_all'))
# Also check that the parameters have the scope we expect.
self.assertTrue(self.NodeFound('embedding_matrix_0'))
self.assertTrue(self.NodeFound('embedding_matrix_1'))
self.assertTrue(self.NodeFound('embedding_matrix_2'))
self.assertFalse(self.NodeFound('embedding_matrix_3'))
def testNestedScope(self):
# It's OK to put the whole graph in a scope of its own.
graph = tf.Graph()
with graph.as_default():
with graph.name_scope('top'):
parser = self.MakeBuilder()
parser.AddTraining(self._task_context,
batch_size=10,
corpus_name='training-corpus')
parser.AddSaver()
self.assertTrue(self.NodeFound('top/training/logits'))
self.assertTrue(self.NodeFound('top/training/feature_0'))
# The saver node is expected to be in the root scope no matter what.
self.assertFalse(self.NodeFound('top/save/restore_all'))
self.assertTrue(self.NodeFound('save/restore_all'))
def testUseCustomGraphs(self):
batch_size = 10
# Use separate custom graphs.
custom_train_graph = tf.Graph()
with custom_train_graph.as_default():
train_parser = self.MakeBuilder()
train_parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
custom_eval_graph = tf.Graph()
with custom_eval_graph.as_default():
eval_parser = self.MakeBuilder()
eval_parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
# The following session runs should not fail.
with self.test_session(graph=custom_train_graph) as sess:
self.assertTrue(self.NodeFound('training/logits'))
sess.run(train_parser.inits.values())
sess.run(['training/logits:0'])
with self.test_session(graph=custom_eval_graph) as sess:
self.assertFalse(self.NodeFound('training/logits'))
self.assertTrue(self.NodeFound('evaluation/logits'))
sess.run(eval_parser.inits.values())
sess.run(['evaluation/logits:0'])
def testTrainingAndEvalAreIndependent(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
# Before any training updates are performed, both training and eval nets
# should return the same computations.
eval_logits, = sess.run([parser.evaluation['logits']])
training_logits, = sess.run([parser.training['logits']])
self.assertNear(abs((eval_logits - training_logits).sum()), 0, 1e-6)
# After training, activations should differ.
for _ in range(5):
eval_logits = parser.evaluation['logits'].eval()
for _ in range(5):
training_logits, _ = sess.run([parser.training['logits'],
parser.training['train_op']])
self.assertGreater(abs((eval_logits - training_logits).sum()), 0, 1e-3)
def testReproducibility(self):
batch_size = 10
def ComputeACost(graph):
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
for _ in range(5):
cost, _ = sess.run([parser.training['cost'],
parser.training['train_op']])
return cost
cost1 = ComputeACost(tf.Graph())
cost2 = ComputeACost(tf.Graph())
self.assertNear(cost1, cost2, 1e-8)
def testAddTrainingAndEvalOrderIndependent(self):
batch_size = 10
graph1 = tf.Graph()
with graph1.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
with self.test_session(graph=graph1) as sess:
sess.run(parser.inits.values())
metrics1 = None
for _ in range(500):
cost1, _ = sess.run([parser.training['cost'],
parser.training['train_op']])
em1 = parser.evaluation['eval_metrics'].eval()
metrics1 = metrics1 + em1 if metrics1 is not None else em1
# Reverse the order in which Training and Eval stacks are added.
graph2 = tf.Graph()
with graph2.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
with self.test_session(graph=graph2) as sess:
sess.run(parser.inits.values())
metrics2 = None
for _ in range(500):
cost2, _ = sess.run([parser.training['cost'],
parser.training['train_op']])
em2 = parser.evaluation['eval_metrics'].eval()
metrics2 = metrics2 + em2 if metrics2 is not None else em2
self.assertNear(cost1, cost2, 1e-8)
self.assertEqual(abs(metrics1 - metrics2).sum(), 0)
def testEvalMetrics(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder()
parser.AddEvaluation(self._task_context,
batch_size,
corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
tokens = 0
correct_heads = 0
for _ in range(100):
eval_metrics = sess.run(parser.evaluation['eval_metrics'])
tokens += eval_metrics[0]
correct_heads += eval_metrics[1]
self.assertGreater(tokens, 0)
self.assertGreaterEqual(tokens, correct_heads)
self.assertGreaterEqual(correct_heads, 0)
def MakeSparseFeatures(self, ids, weights):
f = sparse_pb2.SparseFeatures()
for i, w in zip(ids, weights):
f.id.append(i)
f.weight.append(w)
return f.SerializeToString()
def testEmbeddingOp(self):
graph = tf.Graph()
with self.test_session(graph=graph):
params = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
tf.float32)
var = variables.Variable([self.MakeSparseFeatures([1, 2], [1.0, 1.0]),
self.MakeSparseFeatures([], [])])
var.initializer.run()
embeddings = graph_builder.EmbeddingLookupFeatures(params, var,
True).eval()
self.assertAllClose([[8.0, 10.0], [0.0, 0.0]], embeddings)
var = variables.Variable([self.MakeSparseFeatures([], []),
self.MakeSparseFeatures([0, 2],
[0.5, 2.0])])
var.initializer.run()
embeddings = graph_builder.EmbeddingLookupFeatures(params, var,
True).eval()
self.assertAllClose([[0.0, 0.0], [10.5, 13.0]], embeddings)
def testOnlyTrainSomeParameters(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False, only_train='softmax_bias')
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
# Before training, save the state of two of the parameters.
bias0, weight0 = sess.run([parser.params['softmax_bias'],
parser.params['softmax_weight']])
for _ in range(5):
bias, weight, _ = sess.run([parser.params['softmax_bias'],
parser.params['softmax_weight'],
parser.training['train_op']])
# After training, only one of the parameters should have changed.
self.assertAllEqual(weight, weight0)
self.assertGreater(abs(bias - bias0).sum(), 0, 1e-5)
if __name__ == '__main__':
googletest.main()
| models-master | syntaxnet/syntaxnet/graph_builder_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build structured parser models."""
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from syntaxnet import graph_builder
from syntaxnet.ops import gen_parser_ops
tf.NoGradient('BeamParseReader')
tf.NoGradient('BeamParser')
tf.NoGradient('BeamParserOutput')
def AddCrossEntropy(batch_size, n):
"""Adds a cross entropy cost function."""
cross_entropies = []
def _Pass():
return tf.constant(0, dtype=tf.float32, shape=[1])
for beam_id in range(batch_size):
beam_gold_slot = tf.reshape(tf.slice(n['gold_slot'], [beam_id], [1]), [1])
def _ComputeCrossEntropy():
"""Adds ops to compute cross entropy of the gold path in a beam."""
# Requires a cast so that UnsortedSegmentSum, in the gradient,
# is happy with the type of its input 'segment_ids', which
# must be int32.
idx = tf.cast(
tf.reshape(
tf.where(tf.equal(n['beam_ids'], beam_id)), [-1]), tf.int32)
beam_scores = tf.reshape(tf.gather(n['all_path_scores'], idx), [1, -1])
num = tf.shape(idx)
return tf.nn.softmax_cross_entropy_with_logits(
beam_scores, tf.expand_dims(
tf.sparse_to_dense(beam_gold_slot, num, [1.], 0.), 0))
# The conditional here is needed to deal with the last few batches of the
# corpus which can contain -1 in beam_gold_slot for empty batch slots.
cross_entropies.append(cf.cond(
beam_gold_slot[0] >= 0, _ComputeCrossEntropy, _Pass))
return {'cross_entropy': tf.div(tf.add_n(cross_entropies), batch_size)}
class StructuredGraphBuilder(graph_builder.GreedyParser):
"""Extends the standard GreedyParser with a CRF objective using a beam.
The constructor takes two additional keyword arguments.
beam_size: the maximum size the beam can grow to.
max_steps: the maximum number of steps in any particular beam.
The model supports batch training with the batch_size argument to the
AddTraining method.
"""
def __init__(self, *args, **kwargs):
self._beam_size = kwargs.pop('beam_size', 10)
self._max_steps = kwargs.pop('max_steps', 25)
super(StructuredGraphBuilder, self).__init__(*args, **kwargs)
def _AddBeamReader(self,
task_context,
batch_size,
corpus_name,
until_all_final=False,
always_start_new_sentences=False):
"""Adds an op capable of reading sentences and parsing them with a beam."""
features, state, epochs = gen_parser_ops.beam_parse_reader(
task_context=task_context,
feature_size=self._feature_size,
beam_size=self._beam_size,
batch_size=batch_size,
corpus_name=corpus_name,
allow_feature_weights=self._allow_feature_weights,
arg_prefix=self._arg_prefix,
continue_until_all_final=until_all_final,
always_start_new_sentences=always_start_new_sentences)
return {'state': state, 'features': features, 'epochs': epochs}
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}
def AddTraining(self,
task_context,
batch_size,
learning_rate=0.1,
decay_steps=4000,
momentum=None,
corpus_name='documents'):
with tf.name_scope('training'):
n = self.training
n['accumulated_alive_steps'] = self._AddVariable(
[batch_size], tf.int32, 'accumulated_alive_steps',
tf.zeros_initializer)
n.update(self._AddBeamReader(task_context, batch_size, corpus_name))
# This adds a required 'step' node too:
learning_rate = tf.constant(learning_rate, dtype=tf.float32)
n['learning_rate'] = self._AddLearningRate(learning_rate, decay_steps)
# Call BuildNetwork *only* to set up the params outside of the main loop.
self._BuildNetwork(list(n['features']))
n.update(self._BuildSequence(batch_size, self._max_steps, n['features'],
n['state']))
flat_concat_scores = tf.reshape(n['concat_scores'], [-1])
(indices_and_paths, beams_and_slots, n['gold_slot'], n[
'beam_path_scores']) = gen_parser_ops.beam_parser_output(n[
'state'])
n['indices'] = tf.reshape(tf.gather(indices_and_paths, [0]), [-1])
n['path_ids'] = tf.reshape(tf.gather(indices_and_paths, [1]), [-1])
n['all_path_scores'] = tf.sparse_segment_sum(
flat_concat_scores, n['indices'], n['path_ids'])
n['beam_ids'] = tf.reshape(tf.gather(beams_and_slots, [0]), [-1])
n.update(AddCrossEntropy(batch_size, n))
if self._only_train:
trainable_params = {k: v for k, v in self.params.iteritems()
if k in self._only_train}
else:
trainable_params = self.params
for p in trainable_params:
tf.logging.info('trainable_param: %s', p)
regularized_params = [
tf.nn.l2_loss(p) for k, p in trainable_params.iteritems()
if k.startswith('weights') or k.startswith('bias')]
l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0
n['cost'] = tf.add(n['cross_entropy'], l2_loss, name='cost')
n['gradients'] = tf.gradients(n['cost'], trainable_params.values())
with tf.control_dependencies([n['alive_steps']]):
update_accumulators = tf.group(
tf.assign_add(n['accumulated_alive_steps'], n['alive_steps']))
def ResetAccumulators():
return tf.assign(
n['accumulated_alive_steps'], tf.zeros([batch_size], tf.int32))
n['reset_accumulators_func'] = ResetAccumulators
optimizer = tf.train.MomentumOptimizer(n['learning_rate'],
momentum,
use_locking=self._use_locking)
train_op = optimizer.minimize(n['cost'],
var_list=trainable_params.values())
for param in trainable_params.values():
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer)
self.variables[slot.name] = slot
def NumericalChecks():
return tf.group(*[
tf.check_numerics(param, message='Parameter is not finite.')
for param in trainable_params.values()
if param.dtype.base_dtype in [tf.float32, tf.float64]])
check_op = cf.cond(tf.equal(tf.mod(self.GetStep(), self._check_every), 0),
NumericalChecks, tf.no_op)
avg_update_op = tf.group(*self._averaging.values())
train_ops = [train_op]
if self._check_parameters:
train_ops.append(check_op)
if self._use_averaging:
train_ops.append(avg_update_op)
with tf.control_dependencies([update_accumulators]):
n['train_op'] = tf.group(*train_ops, name='train_op')
n['alive_steps'] = tf.identity(n['alive_steps'], name='alive_steps')
return n
def AddEvaluation(self,
task_context,
batch_size,
evaluation_max_steps=300,
corpus_name=None):
with tf.name_scope('evaluation'):
n = self.evaluation
n.update(self._AddBeamReader(task_context,
batch_size,
corpus_name,
until_all_final=True,
always_start_new_sentences=True))
self._BuildNetwork(
list(n['features']),
return_average=self._use_averaging)
n.update(self._BuildSequence(batch_size, evaluation_max_steps, n[
'features'], n['state'], use_average=self._use_averaging))
n['eval_metrics'], n['documents'] = (
gen_parser_ops.beam_eval_output(n['state']))
return n
| models-master | syntaxnet/syntaxnet/structured_graph_builder.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads parser_ops shared library."""
import os.path
import tensorflow as tf
tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'parser_ops.so'))
| models-master | syntaxnet/syntaxnet/load_parser_ops.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for beam_reader_ops."""
import os.path
import time
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(
FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def MakeGraph(self,
max_steps=10,
beam_size=2,
batch_size=1,
**kwargs):
"""Constructs a structured learning graph."""
assert max_steps > 0, 'Empty network not supported.'
logging.info('MakeGraph + %s', kwargs)
with self.test_session(graph=tf.Graph()) as sess:
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=self._task_context))
embedding_dims = [8, 8, 8]
hidden_layer_sizes = []
learning_rate = 0.01
builder = structured_graph_builder.StructuredGraphBuilder(
num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
seed=1,
max_steps=max_steps,
beam_size=beam_size,
gate_gradients=True,
use_locking=True,
use_averaging=False,
check_parameters=False,
**kwargs)
builder.AddTraining(self._task_context,
batch_size,
learning_rate=learning_rate,
decay_steps=1000,
momentum=0.9,
corpus_name='training-corpus')
builder.AddEvaluation(self._task_context,
batch_size,
evaluation_max_steps=25,
corpus_name=None)
builder.training['inits'] = tf.group(*builder.inits.values(), name='inits')
return builder
def Train(self, **kwargs):
with self.test_session(graph=tf.Graph()) as sess:
max_steps = 3
batch_size = 3
beam_size = 3
builder = (
self.MakeGraph(
max_steps=max_steps, beam_size=beam_size,
batch_size=batch_size, **kwargs))
logging.info('params: %s', builder.params.keys())
logging.info('variables: %s', builder.variables.keys())
t = builder.training
sess.run(t['inits'])
costs = []
gold_slots = []
alive_steps_vector = []
every_n = 5
walltime = time.time()
for step in range(10):
if step > 0 and step % every_n == 0:
new_walltime = time.time()
logging.info(
'Step: %d <cost>: %f <gold_slot>: %f <alive_steps>: %f <iter '
'time>: %f ms',
step, sum(costs[-every_n:]) / float(every_n),
sum(gold_slots[-every_n:]) / float(every_n),
sum(alive_steps_vector[-every_n:]) / float(every_n),
1000 * (new_walltime - walltime) / float(every_n))
walltime = new_walltime
cost, gold_slot, alive_steps, _ = sess.run(
[t['cost'], t['gold_slot'], t['alive_steps'], t['train_op']])
costs.append(cost)
gold_slots.append(gold_slot.mean())
alive_steps_vector.append(alive_steps.mean())
if builder._only_train:
trainable_param_names = [
k for k in builder.params if k in builder._only_train]
else:
trainable_param_names = builder.params.keys()
if builder._use_averaging:
for v in trainable_param_names:
avg = builder.variables['%s_avg_var' % v].eval()
tf.assign(builder.params[v], avg).eval()
# Reset for pseudo eval.
costs = []
gold_slots = []
alive_stepss = []
for step in range(10):
cost, gold_slot, alive_steps = sess.run(
[t['cost'], t['gold_slot'], t['alive_steps']])
costs.append(cost)
gold_slots.append(gold_slot.mean())
alive_stepss.append(alive_steps.mean())
logging.info(
'Pseudo eval: <cost>: %f <gold_slot>: %f <alive_steps>: %f',
sum(costs[-every_n:]) / float(every_n),
sum(gold_slots[-every_n:]) / float(every_n),
sum(alive_stepss[-every_n:]) / float(every_n))
def PathScores(self, iterations, beam_size, max_steps, batch_size):
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(beam_size=beam_size, max_steps=max_steps,
batch_size=batch_size).training
sess.run(t['inits'])
all_path_scores = []
beam_path_scores = []
for i in range(iterations):
logging.info('run %d', i)
tensors = (
sess.run(
[t['alive_steps'], t['concat_scores'],
t['all_path_scores'], t['beam_path_scores'],
t['indices'], t['path_ids']]))
logging.info('alive for %s, all_path_scores and beam_path_scores, '
'indices and path_ids:'
'\n%s\n%s\n%s\n%s',
tensors[0], tensors[2], tensors[3], tensors[4], tensors[5])
logging.info('diff:\n%s', tensors[2] - tensors[3])
all_path_scores.append(tensors[2])
beam_path_scores.append(tensors[3])
return all_path_scores, beam_path_scores
def testParseUntilNotAlive(self):
"""Ensures that the 'alive' condition works in the Cond ops."""
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training
sess.run(t['inits'])
for i in range(5):
logging.info('run %d', i)
tf_alive = t['alive'].eval()
self.assertFalse(any(tf_alive))
def testParseMomentum(self):
"""Ensures that Momentum training can be done using the gradients."""
self.Train()
self.Train(model_cost='perceptron_loss')
self.Train(model_cost='perceptron_loss',
only_train='softmax_weight,softmax_bias', softmax_init=0)
self.Train(only_train='softmax_weight,softmax_bias', softmax_init=0)
def testPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=5, batch_size=1)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
def testBatchPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=5, batch_size=22)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
def testBatchOneStepPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=1, batch_size=22)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
if __name__ == '__main__':
googletest.main()
| models-master | syntaxnet/syntaxnet/beam_reader_ops_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A program to train a tensorflow neural net parser from a a conll file."""
import os
import os.path
import time
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from google.protobuf import text_format
from syntaxnet import graph_builder
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
from syntaxnet import task_spec_pb2
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('tf_master', '',
'TensorFlow execution engine to connect to.')
flags.DEFINE_string('output_path', '', 'Top level for output.')
flags.DEFINE_string('task_context', '',
'Path to a task context with resource locations and '
'parameters.')
flags.DEFINE_string('arg_prefix', None, 'Prefix for context parameters.')
flags.DEFINE_string('params', '0', 'Unique identifier of parameter grid point.')
flags.DEFINE_string('training_corpus', 'training-corpus',
'Name of the context input to read training data from.')
flags.DEFINE_string('tuning_corpus', 'tuning-corpus',
'Name of the context input to read tuning data from.')
flags.DEFINE_string('word_embeddings', None,
'Recordio containing pretrained word embeddings, will be '
'loaded as the first embedding matrix.')
flags.DEFINE_bool('compute_lexicon', False, '')
flags.DEFINE_bool('projectivize_training_set', False, '')
flags.DEFINE_string('hidden_layer_sizes', '200,200',
'Comma separated list of hidden layer sizes.')
flags.DEFINE_string('graph_builder', 'greedy',
'Graph builder to use, either "greedy" or "structured".')
flags.DEFINE_integer('batch_size', 32,
'Number of sentences to process in parallel.')
flags.DEFINE_integer('beam_size', 10, 'Number of slots for beam parsing.')
flags.DEFINE_integer('num_epochs', 10, 'Number of epochs to train for.')
flags.DEFINE_integer('max_steps', 50,
'Max number of parser steps during a training step.')
flags.DEFINE_integer('report_every', 100,
'Report cost and training accuracy every this many steps.')
flags.DEFINE_integer('checkpoint_every', 5000,
'Measure tuning UAS and checkpoint every this many steps.')
flags.DEFINE_bool('slim_model', False,
'Whether to remove non-averaged variables, for compactness.')
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate parameter.')
flags.DEFINE_integer('decay_steps', 4000,
'Decay learning rate by 0.96 every this many steps.')
flags.DEFINE_float('momentum', 0.9,
'Momentum parameter for momentum optimizer.')
flags.DEFINE_string('seed', '0', 'Initialization seed for TF variables.')
flags.DEFINE_string('pretrained_params', None,
'Path to model from which to load params.')
flags.DEFINE_string('pretrained_params_names', None,
'List of names of tensors to load from pretrained model.')
flags.DEFINE_float('averaging_decay', 0.9999,
'Decay for exponential moving average when computing'
'averaged parameters, set to 1 to do vanilla averaging.')
def StageName():
return os.path.join(FLAGS.arg_prefix, FLAGS.graph_builder)
def OutputPath(path):
return os.path.join(FLAGS.output_path, StageName(), FLAGS.params, path)
def RewriteContext():
context = task_spec_pb2.TaskSpec()
with gfile.FastGFile(FLAGS.task_context) as fin:
text_format.Merge(fin.read(), context)
for resource in context.input:
if resource.creator == StageName():
del resource.part[:]
part = resource.part.add()
part.file_pattern = os.path.join(OutputPath(resource.name))
with gfile.FastGFile(OutputPath('context'), 'w') as fout:
fout.write(str(context))
def WriteStatus(num_steps, eval_metric, best_eval_metric):
status = os.path.join(os.getenv('GOOGLE_STATUS_DIR') or '/tmp', 'STATUS')
message = ('Parameters: %s | Steps: %d | Tuning score: %.2f%% | '
'Best tuning score: %.2f%%' % (FLAGS.params, num_steps,
eval_metric, best_eval_metric))
with gfile.FastGFile(status, 'w') as fout:
fout.write(message)
with gfile.FastGFile(OutputPath('status'), 'a') as fout:
fout.write(message + '\n')
def Eval(sess, parser, num_steps, best_eval_metric):
"""Evaluates a network and checkpoints it to disk.
Args:
sess: tensorflow session to use
parser: graph builder containing all ops references
num_steps: number of training steps taken, for logging
best_eval_metric: current best eval metric, to decide whether this model is
the best so far
Returns:
new best eval metric
"""
logging.info('Evaluating training network.')
t = time.time()
num_epochs = None
num_tokens = 0
num_correct = 0
while True:
tf_eval_epochs, tf_eval_metrics = sess.run([
parser.evaluation['epochs'], parser.evaluation['eval_metrics']
])
num_tokens += tf_eval_metrics[0]
num_correct += tf_eval_metrics[1]
if num_epochs is None:
num_epochs = tf_eval_epochs
elif num_epochs < tf_eval_epochs:
break
eval_metric = 0 if num_tokens == 0 else (100.0 * num_correct / num_tokens)
logging.info('Seconds elapsed in evaluation: %.2f, '
'eval metric: %.2f%%', time.time() - t, eval_metric)
WriteStatus(num_steps, eval_metric, max(eval_metric, best_eval_metric))
# Save parameters.
if FLAGS.output_path:
logging.info('Writing out trained parameters.')
parser.saver.save(sess, OutputPath('latest-model'))
if eval_metric > best_eval_metric:
parser.saver.save(sess, OutputPath('model'))
return max(eval_metric, best_eval_metric)
def Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims):
"""Builds and trains the network.
Args:
sess: tensorflow session to use.
num_actions: number of possible golden actions.
feature_sizes: size of each feature vector.
domain_sizes: number of possible feature ids in each feature vector.
embedding_dims: embedding dimension to use for each feature group.
"""
t = time.time()
hidden_layer_sizes = map(int, FLAGS.hidden_layer_sizes.split(','))
logging.info('Building training network with parameters: feature_sizes: %s '
'domain_sizes: %s', feature_sizes, domain_sizes)
if FLAGS.graph_builder == 'greedy':
parser = graph_builder.GreedyParser(num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
seed=int(FLAGS.seed),
gate_gradients=True,
averaging_decay=FLAGS.averaging_decay,
arg_prefix=FLAGS.arg_prefix)
else:
parser = structured_graph_builder.StructuredGraphBuilder(
num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
seed=int(FLAGS.seed),
gate_gradients=True,
averaging_decay=FLAGS.averaging_decay,
arg_prefix=FLAGS.arg_prefix,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps)
task_context = OutputPath('context')
if FLAGS.word_embeddings is not None:
parser.AddPretrainedEmbeddings(0, FLAGS.word_embeddings, task_context)
corpus_name = ('projectivized-training-corpus' if
FLAGS.projectivize_training_set else FLAGS.training_corpus)
parser.AddTraining(task_context,
FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
momentum=FLAGS.momentum,
decay_steps=FLAGS.decay_steps,
corpus_name=corpus_name)
parser.AddEvaluation(task_context,
FLAGS.batch_size,
corpus_name=FLAGS.tuning_corpus)
parser.AddSaver(FLAGS.slim_model)
# Save graph.
if FLAGS.output_path:
with gfile.FastGFile(OutputPath('graph'), 'w') as f:
f.write(sess.graph_def.SerializeToString())
logging.info('Initializing...')
num_epochs = 0
cost_sum = 0.0
num_steps = 0
best_eval_metric = 0.0
sess.run(parser.inits.values())
if FLAGS.pretrained_params is not None:
logging.info('Loading pretrained params from %s', FLAGS.pretrained_params)
feed_dict = {'save/Const:0': FLAGS.pretrained_params}
targets = []
for node in sess.graph_def.node:
if (node.name.startswith('save/Assign') and
node.input[0] in FLAGS.pretrained_params_names.split(',')):
logging.info('Loading %s with op %s', node.input[0], node.name)
targets.append(node.name)
sess.run(targets, feed_dict=feed_dict)
logging.info('Training...')
while num_epochs < FLAGS.num_epochs:
tf_epochs, tf_cost, _ = sess.run([parser.training[
'epochs'], parser.training['cost'], parser.training['train_op']])
num_epochs = tf_epochs
num_steps += 1
cost_sum += tf_cost
if num_steps % FLAGS.report_every == 0:
logging.info('Epochs: %d, num steps: %d, '
'seconds elapsed: %.2f, avg cost: %.2f, ', num_epochs,
num_steps, time.time() - t, cost_sum / FLAGS.report_every)
cost_sum = 0.0
if num_steps % FLAGS.checkpoint_every == 0:
best_eval_metric = Eval(sess, parser, num_steps, best_eval_metric)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
if not gfile.IsDirectory(OutputPath('')):
gfile.MakeDirs(OutputPath(''))
# Rewrite context.
RewriteContext()
# Creates necessary term maps.
if FLAGS.compute_lexicon:
logging.info('Computing lexicon...')
with tf.Session(FLAGS.tf_master) as sess:
gen_parser_ops.lexicon_builder(task_context=OutputPath('context'),
corpus_name=FLAGS.training_corpus).run()
with tf.Session(FLAGS.tf_master) as sess:
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=OutputPath('context'),
arg_prefix=FLAGS.arg_prefix))
# Well formed and projectivize.
if FLAGS.projectivize_training_set:
logging.info('Preprocessing...')
with tf.Session(FLAGS.tf_master) as sess:
source, last = gen_parser_ops.document_source(
task_context=OutputPath('context'),
batch_size=FLAGS.batch_size,
corpus_name=FLAGS.training_corpus)
sink = gen_parser_ops.document_sink(
task_context=OutputPath('context'),
corpus_name='projectivized-training-corpus',
documents=gen_parser_ops.projectivize_filter(
gen_parser_ops.well_formed_filter(source,
task_context=OutputPath(
'context')),
task_context=OutputPath('context')))
while True:
tf_last, _ = sess.run([last, sink])
if tf_last:
break
logging.info('Training...')
with tf.Session(FLAGS.tf_master) as sess:
Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims)
if __name__ == '__main__':
tf.app.run()
| models-master | syntaxnet/syntaxnet/parser_trainer.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A program to annotate a conll file with a tensorflow neural net parser."""
import os
import os.path
import time
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import sentence_pb2
from syntaxnet import graph_builder
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('task_context', '',
'Path to a task context with inputs and parameters for '
'feature extractors.')
flags.DEFINE_string('model_path', '', 'Path to model parameters.')
flags.DEFINE_string('arg_prefix', None, 'Prefix for context parameters.')
flags.DEFINE_string('graph_builder', 'greedy',
'Which graph builder to use, either greedy or structured.')
flags.DEFINE_string('input', 'stdin',
'Name of the context input to read data from.')
flags.DEFINE_string('output', 'stdout',
'Name of the context input to write data to.')
flags.DEFINE_string('hidden_layer_sizes', '200,200',
'Comma separated list of hidden layer sizes.')
flags.DEFINE_integer('batch_size', 32,
'Number of sentences to process in parallel.')
flags.DEFINE_integer('beam_size', 8, 'Number of slots for beam parsing.')
flags.DEFINE_integer('max_steps', 1000, 'Max number of steps to take.')
flags.DEFINE_bool('slim_model', False,
'Whether to expect only averaged variables.')
def Eval(sess, num_actions, feature_sizes, domain_sizes, embedding_dims):
"""Builds and evaluates a network.
Args:
sess: tensorflow session to use
num_actions: number of possible golden actions
feature_sizes: size of each feature vector
domain_sizes: number of possible feature ids in each feature vector
embedding_dims: embedding dimension for each feature group
"""
t = time.time()
hidden_layer_sizes = map(int, FLAGS.hidden_layer_sizes.split(','))
logging.info('Building training network with parameters: feature_sizes: %s '
'domain_sizes: %s', feature_sizes, domain_sizes)
if FLAGS.graph_builder == 'greedy':
parser = graph_builder.GreedyParser(num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
gate_gradients=True,
arg_prefix=FLAGS.arg_prefix)
else:
parser = structured_graph_builder.StructuredGraphBuilder(
num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
gate_gradients=True,
arg_prefix=FLAGS.arg_prefix,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps)
task_context = FLAGS.task_context
parser.AddEvaluation(task_context,
FLAGS.batch_size,
corpus_name=FLAGS.input,
evaluation_max_steps=FLAGS.max_steps)
parser.AddSaver(FLAGS.slim_model)
sess.run(parser.inits.values())
parser.saver.restore(sess, FLAGS.model_path)
sink_documents = tf.placeholder(tf.string)
sink = gen_parser_ops.document_sink(sink_documents,
task_context=FLAGS.task_context,
corpus_name=FLAGS.output)
t = time.time()
num_epochs = None
num_tokens = 0
num_correct = 0
num_documents = 0
while True:
tf_eval_epochs, tf_eval_metrics, tf_documents = sess.run([
parser.evaluation['epochs'],
parser.evaluation['eval_metrics'],
parser.evaluation['documents'],
])
if len(tf_documents):
logging.info('Processed %d documents', len(tf_documents))
num_documents += len(tf_documents)
sess.run(sink, feed_dict={sink_documents: tf_documents})
num_tokens += tf_eval_metrics[0]
num_correct += tf_eval_metrics[1]
if num_epochs is None:
num_epochs = tf_eval_epochs
elif num_epochs < tf_eval_epochs:
break
logging.info('Total processed documents: %d', num_documents)
if num_tokens > 0:
eval_metric = 100.0 * num_correct / num_tokens
logging.info('num correct tokens: %d', num_correct)
logging.info('total tokens: %d', num_tokens)
logging.info('Seconds elapsed in evaluation: %.2f, '
'eval metric: %.2f%%', time.time() - t, eval_metric)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
with tf.Session() as sess:
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=FLAGS.task_context,
arg_prefix=FLAGS.arg_prefix))
with tf.Session() as sess:
Eval(sess, num_actions, feature_sizes, domain_sizes, embedding_dims)
if __name__ == '__main__':
tf.app.run()
| models-master | syntaxnet/syntaxnet/parser_eval.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds parser models."""
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from syntaxnet.ops import gen_parser_ops
def BatchedSparseToDense(sparse_indices, output_size):
"""Batch compatible sparse to dense conversion.
This is useful for one-hot coded target labels.
Args:
sparse_indices: [batch_size] tensor containing one index per batch
output_size: needed in order to generate the correct dense output
Returns:
A [batch_size, output_size] dense tensor.
"""
eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
return tf.nn.embedding_lookup(eye, sparse_indices)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
"""Computes embeddings for each entry of sparse features sparse_features.
Args:
params: list of 2D tensors containing vector embeddings
sparse_features: 1D tensor of strings. Each entry is a string encoding of
dist_belief.SparseFeatures, and represents a variable length list of
feature ids, and optionally, corresponding weights values.
allow_weights: boolean to control whether the weights returned from the
SparseFeatures are used to multiply the embeddings.
Returns:
A tensor representing the combined embeddings for the sparse features.
For each entry s in sparse_features, the function looks up the embeddings
for each id and sums them into a single tensor weighing them by the
weight of each id. It returns a tensor with each entry of sparse_features
replaced by this combined embedding.
"""
if not isinstance(params, list):
params = [params]
# Lookup embeddings.
sparse_features = tf.convert_to_tensor(sparse_features)
indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)
embeddings = tf.nn.embedding_lookup(params, ids)
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]])
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
class GreedyParser(object):
"""Builds a Chen & Manning style greedy neural net parser.
Builds a graph with an optional reader op connected at one end and
operations needed to train the network on the other. Supports multiple
network instantiations sharing the same parameters and network topology.
The following named nodes are added to the training and eval networks:
epochs: a tensor containing the current epoch number
cost: a tensor containing the current training step cost
gold_actions: a tensor containing actions from gold decoding
feature_endpoints: a list of sparse feature vectors
logits: output of the final layer before computing softmax
The training network also contains:
train_op: an op that executes a single training step
Typical usage:
parser = graph_builder.GreedyParser(num_actions, num_features,
num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(task_context, batch_size=5)
with tf.Session('local') as sess:
# This works because the session uses the same default graph as the
# GraphBuilder did.
sess.run(parser.inits.values())
while True:
tf_epoch, _ = sess.run([parser.training['epoch'],
parser.training['train_op']])
if tf_epoch[0] > 0:
break
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
seed=None,
gate_gradients=False,
use_locking=False,
embedding_init=1.0,
relu_init=1e-4,
bias_init=0.2,
softmax_init=1e-4,
averaging_decay=0.9999,
use_averaging=True,
check_parameters=True,
check_every=1,
allow_feature_weights=False,
only_train='',
arg_prefix=None,
**unused_kwargs):
"""Initialize the graph builder with parameters defining the network.
Args:
num_actions: int size of the set of parser actions
num_features: int list of dimensions of the feature vectors
num_feature_ids: int list of same length as num_features corresponding to
the sizes of the input feature spaces
embedding_sizes: int list of same length as num_features of the desired
embedding layer sizes
hidden_layer_sizes: int list of desired relu layer sizes; may be empty
seed: optional random initializer seed to enable reproducibility
gate_gradients: if True, gradient updates are computed synchronously,
ensuring consistency and reproducibility
use_locking: if True, use locking to avoid read-write contention when
updating Variables
embedding_init: sets the std dev of normal initializer of embeddings to
embedding_init / embedding_size ** .5
relu_init: sets the std dev of normal initializer of relu weights
to relu_init
bias_init: sets constant initializer of relu bias to bias_init
softmax_init: sets the std dev of normal initializer of softmax init
to softmax_init
averaging_decay: decay for exponential moving average when computing
averaged parameters, set to 1 to do vanilla averaging
use_averaging: whether to use moving averages of parameters during evals
check_parameters: whether to check for NaN/Inf parameters during
training
check_every: checks numerics every check_every steps.
allow_feature_weights: whether feature weights are allowed.
only_train: the comma separated set of parameter names to train. If empty,
all model parameters will be trained.
arg_prefix: prefix for context parameters.
"""
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._seed = seed
self._gate_gradients = gate_gradients
self._use_locking = use_locking
self._use_averaging = use_averaging
self._check_parameters = check_parameters
self._check_every = check_every
self._allow_feature_weights = allow_feature_weights
self._only_train = set(only_train.split(',')) if only_train else None
self._feature_size = len(embedding_sizes)
self._embedding_init = embedding_init
self._relu_init = relu_init
self._softmax_init = softmax_init
self._arg_prefix = arg_prefix
# Parameters of the network with respect to which training is done.
self.params = {}
# Other variables, with respect to which no training is done, but which we
# nonetheless need to save in order to capture the state of the graph.
self.variables = {}
# Operations to initialize any nodes that require initialization.
self.inits = {}
# Training- and eval-related nodes.
self.training = {}
self.evaluation = {}
self.saver = None
# Nodes to compute moving averages of parameters, called every train step.
self._averaging = {}
self._averaging_decay = averaging_decay
# Pretrained embeddings that can be used instead of constant initializers.
self._pretrained_embeddings = {}
# After the following 'with' statement, we'll be able to re-enter the
# 'params' scope by re-using the self._param_scope member variable. See for
# instance _AddParam.
with tf.name_scope('params') as self._param_scope:
self._relu_bias_init = tf.constant_initializer(bias_init)
@property
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self,
shape,
dtype,
name,
initializer=None,
return_average=False):
"""Add a model parameter w.r.t. we expect to compute gradients.
_AddParam creates both regular parameters (usually for training) and
averaged nodes (usually for inference). It returns one or the other based
on the 'return_average' arg.
Args:
shape: int list, tensor shape of the parameter to create
dtype: tf.DataType, data type of the parameter
name: string, name of the parameter in the TF graph
initializer: optional initializer for the paramter
return_average: if False, return parameter otherwise return moving average
Returns:
parameter or averaged parameter
"""
if name not in self.params:
step = tf.cast(self.GetStep(), tf.float32)
# Put all parameters and their initializing ops in their own scope
# irrespective of the current scope (training or eval).
with tf.name_scope(self._param_scope):
self.params[name] = tf.get_variable(name, shape, dtype, initializer)
param = self.params[name]
if initializer is not None:
self.inits[name] = state_ops.init_variable(param, initializer)
if self._averaging_decay == 1:
logging.info('Using vanilla averaging of parameters.')
ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),
num_updates=None)
else:
ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,
num_updates=step)
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer)
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
def GetStep(self):
def OnesInitializer(shape, dtype=tf.float32):
return tf.ones(shape, dtype)
return self._AddVariable([], tf.int32, 'step', OnesInitializer)
def _AddVariable(self, shape, dtype, name, initializer=None):
if name in self.variables:
return self.variables[name]
self.variables[name] = tf.get_variable(name, shape, dtype, initializer)
if initializer is not None:
self.inits[name] = state_ops.init_variable(self.variables[name],
initializer)
return self.variables[name]
def _ReluWeightInitializer(self):
with tf.name_scope(self._param_scope):
return tf.random_normal_initializer(stddev=self._relu_init,
seed=self._seed)
def _EmbeddingMatrixInitializer(self, index, embedding_size):
if index in self._pretrained_embeddings:
return self._pretrained_embeddings[index]
else:
return tf.random_normal_initializer(
stddev=self._embedding_init / embedding_size**.5,
seed=self._seed)
def _AddEmbedding(self,
features,
num_features,
num_ids,
embedding_size,
index,
return_average=False):
"""Adds an embedding matrix and passes the `features` vector through it."""
embedding_matrix = self._AddParam(
[num_ids, embedding_size],
tf.float32,
'embedding_matrix_%d' % index,
self._EmbeddingMatrixInitializer(index, embedding_size),
return_average=return_average)
embedding = EmbeddingLookupFeatures(embedding_matrix,
tf.reshape(features,
[-1],
name='feature_%d' % index),
self._allow_feature_weights)
return tf.reshape(embedding, [-1, num_features * embedding_size])
def _BuildNetwork(self, feature_endpoints, return_average=False):
"""Builds a feed-forward part of the net given features as input.
The network topology is already defined in the constructor, so multiple
calls to BuildForward build multiple networks whose parameters are all
shared. It is the source of the input features and the use of the output
that distinguishes each network.
Args:
feature_endpoints: tensors with input features to the network
return_average: whether to use moving averages as model parameters
Returns:
logits: output of the final layer before computing softmax
"""
assert len(feature_endpoints) == self._feature_size
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(feature_endpoints[i],
self._num_features[i],
self._num_feature_ids[i],
self._embedding_sizes[i],
i,
return_average=return_average))
last_layer = tf.concat(1, embeddings)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
weights = self._AddParam(
[last_layer_size, hidden_layer_size],
tf.float32,
'weights_%d' % i,
self._ReluWeightInitializer(),
return_average=return_average)
bias = self._AddParam([hidden_layer_size],
tf.float32,
'bias_%d' % i,
self._relu_bias_init,
return_average=return_average)
last_layer = tf.nn.relu_layer(last_layer,
weights,
bias,
name='layer_%d' % i)
last_layer_size = hidden_layer_size
# Create softmax layer.
softmax_weight = self._AddParam(
[last_layer_size, self._num_actions],
tf.float32,
'softmax_weight',
tf.random_normal_initializer(stddev=self._softmax_init,
seed=self._seed),
return_average=return_average)
softmax_bias = self._AddParam(
[self._num_actions],
tf.float32,
'softmax_bias',
tf.zeros_initializer,
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
softmax_bias,
name='logits')
return {'logits': logits}
def _AddGoldReader(self, task_context, batch_size, corpus_name):
features, epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'gold_actions': tf.identity(gold_actions,
name='gold_actions'),
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features}
def _AddDecodedReader(self, task_context, batch_size, transition_scores,
corpus_name):
features, epochs, eval_metrics, documents = (
gen_parser_ops.decoded_parse_reader(transition_scores,
task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'eval_metrics': eval_metrics,
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features,
'documents': documents}
def _AddCostFunction(self, batch_size, gold_actions, logits):
"""Cross entropy plus L2 loss on weights and biases of the hidden layers."""
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions)
cross_entropy = tf.div(
tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(
logits, dense_golden)), batch_size)
regularized_params = [tf.nn.l2_loss(p)
for k, p in self.params.items()
if k.startswith('weights') or k.startswith('bias')]
l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0
return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}
def AddEvaluation(self,
task_context,
batch_size,
evaluation_max_steps=300,
corpus_name='documents'):
"""Builds the forward network only without the training operation.
Args:
task_context: file path from which to read the task context.
batch_size: batch size to request from reader op.
evaluation_max_steps: max number of parsing actions during evaluation,
only used in beam parsing.
corpus_name: name of the task input to read parses from.
Returns:
Dictionary of named eval nodes.
"""
def _AssignTransitionScores():
return tf.assign(nodes['transition_scores'],
nodes['logits'], validate_shape=False)
def _Pass():
return tf.constant(-1.0)
unused_evaluation_max_steps = evaluation_max_steps
with tf.name_scope('evaluation'):
nodes = self.evaluation
nodes['transition_scores'] = self._AddVariable(
[batch_size, self._num_actions], tf.float32, 'transition_scores',
tf.constant_initializer(-1.0))
nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[
'transition_scores'], corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=self._use_averaging))
nodes['eval_metrics'] = cf.with_dependencies(
[tf.cond(tf.greater(tf.size(nodes['logits']), 0),
_AssignTransitionScores, _Pass)],
nodes['eval_metrics'], name='eval_metrics')
return nodes
def _IncrementCounter(self, counter):
return state_ops.assign_add(counter, 1, use_locking=True)
def _AddLearningRate(self, initial_learning_rate, decay_steps):
"""Returns a learning rate that decays by 0.96 every decay_steps.
Args:
initial_learning_rate: initial value of the learning rate
decay_steps: decay by 0.96 every this many steps
Returns:
learning rate variable.
"""
step = self.GetStep()
return cf.with_dependencies(
[self._IncrementCounter(step)],
tf.train.exponential_decay(initial_learning_rate,
step,
decay_steps,
0.96,
staircase=True))
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32):
unused_dtype = dtype
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
def AddTraining(self,
task_context,
batch_size,
learning_rate=0.1,
decay_steps=4000,
momentum=0.9,
corpus_name='documents'):
"""Builds a trainer to minimize the cross entropy cost function.
Args:
task_context: file path from which to read the task context
batch_size: batch size to request from reader op
learning_rate: initial value of the learning rate
decay_steps: decay learning rate by 0.96 every this many steps
momentum: momentum parameter used when training with momentum
corpus_name: name of the task input to read parses from
Returns:
Dictionary of named training nodes.
"""
with tf.name_scope('training'):
nodes = self.training
nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=False))
nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
nodes['logits']))
# Add the optimizer
if self._only_train:
trainable_params = [v
for k, v in self.params.iteritems()
if k in self._only_train]
else:
trainable_params = self.params.values()
lr = self._AddLearningRate(learning_rate, decay_steps)
optimizer = tf.train.MomentumOptimizer(lr,
momentum,
use_locking=self._use_locking)
train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer)
self.variables[slot.name] = slot
numerical_checks = [
tf.check_numerics(param,
message='Parameter is not finite.')
for param in trainable_params
if param.dtype.base_dtype in [tf.float32, tf.float64]
]
check_op = tf.group(*numerical_checks)
avg_update_op = tf.group(*self._averaging.values())
train_ops = [train_op]
if self._check_parameters:
train_ops.append(check_op)
if self._use_averaging:
train_ops.append(avg_update_op)
nodes['train_op'] = tf.group(*train_ops, name='train_op')
return nodes
def AddSaver(self, slim_model=False):
"""Adds ops to save and restore model parameters.
Args:
slim_model: whether only averaged variables are saved.
Returns:
the saver object.
"""
# We have to put the save op in the root scope otherwise running
# "save/restore_all" won't find the "save/Const" node it expects.
with tf.name_scope(None):
variables_to_save = self.params.copy()
variables_to_save.update(self.variables)
if slim_model:
for key in variables_to_save.keys():
if not key.endswith('avg_var'):
del variables_to_save[key]
self.saver = tf.train.Saver(variables_to_save)
return self.saver
| models-master | syntaxnet/syntaxnet/graph_builder.py |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lexicon_builder."""
# disable=no-name-in-module,unused-import,g-bad-import-order,maybe-no-member
import os.path
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import sentence_pb2
from syntaxnet import task_spec_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
CONLL_DOC1 = u'''1 बात _ n NN _ _ _ _ _
2 गलत _ adj JJ _ _ _ _ _
3 हो _ v VM _ _ _ _ _
4 तो _ avy CC _ _ _ _ _
5 गुस्सा _ n NN _ _ _ _ _
6 सेलेब्रिटिज _ n NN _ _ _ _ _
7 को _ psp PSP _ _ _ _ _
8 भी _ avy RP _ _ _ _ _
9 आना _ v VM _ _ _ _ _
10 लाजमी _ adj JJ _ _ _ _ _
11 है _ v VM _ _ _ _ _
12 । _ punc SYM _ _ _ _ _'''
CONLL_DOC2 = u'''1 लेकिन _ avy CC _ _ _ _ _
2 अभिनेत्री _ n NN _ _ _ _ _
3 के _ psp PSP _ _ _ _ _
4 इस _ pn DEM _ _ _ _ _
5 कदम _ n NN _ _ _ _ _
6 से _ psp PSP _ _ _ _ _
7 वहां _ pn PRP _ _ _ _ _
8 रंग _ n NN _ _ _ _ _
9 में _ psp PSP _ _ _ _ _
10 भंग _ adj JJ _ _ _ _ _
11 पड़ _ v VM _ _ _ _ _
12 गया _ v VAUX _ _ _ _ _
13 । _ punc SYM _ _ _ _ _'''
TAGS = ['NN', 'JJ', 'VM', 'CC', 'PSP', 'RP', 'JJ', 'SYM', 'DEM', 'PRP', 'VAUX']
CATEGORIES = ['n', 'adj', 'v', 'avy', 'n', 'psp', 'punc', 'pn']
TOKENIZED_DOCS = u'''बात गलत हो तो गुस्सा सेलेब्रिटिज को भी आना लाजमी है ।
लेकिन अभिनेत्री के इस कदम से वहां रंग में भंग पड़ गया ।
'''
COMMENTS = u'# Line with fake comments.'
class LexiconBuilderTest(test_util.TensorFlowTestCase):
def setUp(self):
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
self.corpus_file = os.path.join(FLAGS.test_tmpdir, 'documents.conll')
self.context_file = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
def AddInput(self, name, file_pattern, record_format, context):
inp = context.input.add()
inp.name = name
inp.record_format.append(record_format)
inp.part.add().file_pattern = file_pattern
def WriteContext(self, corpus_format):
context = task_spec_pb2.TaskSpec()
self.AddInput('documents', self.corpus_file, corpus_format, context)
for name in ('word-map', 'lcword-map', 'tag-map',
'category-map', 'label-map', 'prefix-table',
'suffix-table', 'tag-to-category'):
self.AddInput(name, os.path.join(FLAGS.test_tmpdir, name), '', context)
logging.info('Writing context to: %s', self.context_file)
with open(self.context_file, 'w') as f:
f.write(str(context))
def ReadNextDocument(self, sess, doc_source):
doc_str, last = sess.run(doc_source)
if doc_str:
doc = sentence_pb2.Sentence()
doc.ParseFromString(doc_str[0])
else:
doc = None
return doc, last
def ValidateDocuments(self):
doc_source = gen_parser_ops.document_source(self.context_file, batch_size=1)
with self.test_session() as sess:
logging.info('Reading document1')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertEqual(len(doc.token), 12)
self.assertEqual(u'लाजमी', doc.token[9].word)
self.assertFalse(last)
logging.info('Reading document2')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertEqual(len(doc.token), 13)
self.assertEqual(u'भंग', doc.token[9].word)
self.assertFalse(last)
logging.info('Hitting end of the dataset')
doc, last = self.ReadNextDocument(sess, doc_source)
self.assertTrue(doc is None)
self.assertTrue(last)
def ValidateTagToCategoryMap(self):
with file(os.path.join(FLAGS.test_tmpdir, 'tag-to-category'), 'r') as f:
entries = [line.strip().split('\t') for line in f.readlines()]
for tag, category in entries:
self.assertIn(tag, TAGS)
self.assertIn(category, CATEGORIES)
def BuildLexicon(self):
with self.test_session():
gen_parser_ops.lexicon_builder(task_context=self.context_file).run()
def testCoNLLFormat(self):
self.WriteContext('conll-sentence')
logging.info('Writing conll file to: %s', self.corpus_file)
with open(self.corpus_file, 'w') as f:
f.write((CONLL_DOC1 + u'\n\n' + CONLL_DOC2 + u'\n')
.replace(' ', '\t').encode('utf-8'))
self.ValidateDocuments()
self.BuildLexicon()
self.ValidateTagToCategoryMap()
def testCoNLLFormatExtraNewlinesAndComments(self):
self.WriteContext('conll-sentence')
with open(self.corpus_file, 'w') as f:
f.write((u'\n\n\n' + CONLL_DOC1 + u'\n\n\n' + COMMENTS +
u'\n\n' + CONLL_DOC2).replace(' ', '\t').encode('utf-8'))
self.ValidateDocuments()
self.BuildLexicon()
self.ValidateTagToCategoryMap()
def testTokenizedTextFormat(self):
self.WriteContext('tokenized-text')
with open(self.corpus_file, 'w') as f:
f.write(TOKENIZED_DOCS.encode('utf-8'))
self.ValidateDocuments()
self.BuildLexicon()
def testTokenizedTextFormatExtraNewlines(self):
self.WriteContext('tokenized-text')
with open(self.corpus_file, 'w') as f:
f.write((u'\n\n\n' + TOKENIZED_DOCS + u'\n\n\n').encode('utf-8'))
self.ValidateDocuments()
self.BuildLexicon()
if __name__ == '__main__':
googletest.main()
| models-master | syntaxnet/syntaxnet/lexicon_builder_test.py |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for english_tokenizer."""
# disable=no-name-in-module,unused-import,g-bad-import-order,maybe-no-member
import os.path
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import sentence_pb2
from syntaxnet import task_spec_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
class TextFormatsTest(test_util.TensorFlowTestCase):
def setUp(self):
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
self.corpus_file = os.path.join(FLAGS.test_tmpdir, 'documents.conll')
self.context_file = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
def AddInput(self, name, file_pattern, record_format, context):
inp = context.input.add()
inp.name = name
inp.record_format.append(record_format)
inp.part.add().file_pattern = file_pattern
def WriteContext(self, corpus_format):
context = task_spec_pb2.TaskSpec()
self.AddInput('documents', self.corpus_file, corpus_format, context)
for name in ('word-map', 'lcword-map', 'tag-map',
'category-map', 'label-map', 'prefix-table',
'suffix-table', 'tag-to-category'):
self.AddInput(name, os.path.join(FLAGS.test_tmpdir, name), '', context)
logging.info('Writing context to: %s', self.context_file)
with open(self.context_file, 'w') as f:
f.write(str(context))
def ReadNextDocument(self, sess, sentence):
sentence_str, = sess.run([sentence])
if sentence_str:
sentence_doc = sentence_pb2.Sentence()
sentence_doc.ParseFromString(sentence_str[0])
else:
sentence_doc = None
return sentence_doc
def CheckTokenization(self, sentence, tokenization):
self.WriteContext('english-text')
logging.info('Writing text file to: %s', self.corpus_file)
with open(self.corpus_file, 'w') as f:
f.write(sentence)
sentence, _ = gen_parser_ops.document_source(
self.context_file, batch_size=1)
with self.test_session() as sess:
sentence_doc = self.ReadNextDocument(sess, sentence)
self.assertEqual(' '.join([t.word for t in sentence_doc.token]),
tokenization)
def testSimple(self):
self.CheckTokenization('Hello, world!', 'Hello , world !')
self.CheckTokenization('"Hello"', "`` Hello ''")
self.CheckTokenization('{"Hello@#$', '-LRB- `` Hello @ # $')
self.CheckTokenization('"Hello..."', "`` Hello ... ''")
self.CheckTokenization('()[]{}<>',
'-LRB- -RRB- -LRB- -RRB- -LRB- -RRB- < >')
self.CheckTokenization('Hello--world', 'Hello -- world')
self.CheckTokenization("Isn't", "Is n't")
self.CheckTokenization("n't", "n't")
self.CheckTokenization('Hello Mr. Smith.', 'Hello Mr. Smith .')
self.CheckTokenization("It's Mr. Smith's.", "It 's Mr. Smith 's .")
self.CheckTokenization("It's the Smiths'.", "It 's the Smiths ' .")
self.CheckTokenization('Gotta go', 'Got ta go')
self.CheckTokenization('50-year-old', '50-year-old')
def testUrl(self):
self.CheckTokenization('http://www.google.com/news is down',
'http : //www.google.com/news is down')
if __name__ == '__main__':
googletest.main()
| models-master | syntaxnet/syntaxnet/text_formats_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reader_ops."""
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import dictionary_pb2
from syntaxnet import graph_builder
from syntaxnet import sparse_pb2
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(
FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def GetMaxId(self, sparse_features):
max_id = 0
for x in sparse_features:
for y in x:
f = sparse_pb2.SparseFeatures()
f.ParseFromString(y)
for i in f.id:
max_id = max(i, max_id)
return max_id
def testParsingReaderOp(self):
# Runs the reader over the test input for two epochs.
num_steps_a = 0
num_actions = 0
num_word_ids = 0
num_tag_ids = 0
num_label_ids = 0
batch_size = 10
with self.test_session() as sess:
(words, tags, labels), epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(self._task_context,
3,
batch_size,
corpus_name='training-corpus'))
while True:
tf_gold_actions, tf_epochs, tf_words, tf_tags, tf_labels = (
sess.run([gold_actions, epochs, words, tags, labels]))
num_steps_a += 1
num_actions = max(num_actions, max(tf_gold_actions) + 1)
num_word_ids = max(num_word_ids, self.GetMaxId(tf_words) + 1)
num_tag_ids = max(num_tag_ids, self.GetMaxId(tf_tags) + 1)
num_label_ids = max(num_label_ids, self.GetMaxId(tf_labels) + 1)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Runs the reader again, this time with a lot of added graph nodes.
num_steps_b = 0
with self.test_session() as sess:
num_features = [6, 6, 4]
num_feature_ids = [num_word_ids, num_tag_ids, num_label_ids]
embedding_sizes = [8, 8, 8]
hidden_layer_sizes = [32, 32]
# Here we aim to test the iteration of the reader op in a complex network,
# not the GraphBuilder.
parser = graph_builder.GreedyParser(
num_actions, num_features, num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(self._task_context,
batch_size,
corpus_name='training-corpus')
sess.run(parser.inits.values())
while True:
tf_epochs, tf_cost, _ = sess.run(
[parser.training['epochs'], parser.training['cost'],
parser.training['train_op']])
num_steps_b += 1
self.assertGreaterEqual(tf_cost, 0)
self.assertIn(tf_epochs, [0, 1, 2])
if tf_epochs > 1:
break
# Assert that the two runs made the exact same number of steps.
logging.info('Number of steps in the two runs: %d, %d',
num_steps_a, num_steps_b)
self.assertEqual(num_steps_a, num_steps_b)
def testParsingReaderOpWhileLoop(self):
feature_size = 3
batch_size = 5
def ParserEndpoints():
return gen_parser_ops.gold_parse_reader(self._task_context,
feature_size,
batch_size,
corpus_name='training-corpus')
with self.test_session() as sess:
# The 'condition' and 'body' functions expect as many arguments as there
# are loop variables. 'condition' depends on the 'epoch' loop variable
# only, so we disregard the remaining unused function arguments. 'body'
# returns a list of updated loop variables.
def Condition(epoch, *unused_args):
return tf.less(epoch, 2)
def Body(epoch, num_actions, *feature_args):
# By adding one of the outputs of the reader op ('epoch') as a control
# dependency to the reader op we force the repeated evaluation of the
# reader op.
with epoch.graph.control_dependencies([epoch]):
features, epoch, gold_actions = ParserEndpoints()
num_actions = tf.maximum(num_actions,
tf.reduce_max(gold_actions, [0], False) + 1)
feature_ids = []
for i in range(len(feature_args)):
feature_ids.append(features[i])
return [epoch, num_actions] + feature_ids
epoch = ParserEndpoints()[-2]
num_actions = tf.constant(0)
loop_vars = [epoch, num_actions]
res = sess.run(
cf.While(Condition, Body, loop_vars, parallel_iterations=1))
logging.info('Result: %s', res)
self.assertEqual(res[0], 2)
def testWordEmbeddingInitializer(self):
def _TokenEmbedding(token, embedding):
e = dictionary_pb2.TokenEmbedding()
e.token = token
e.vector.values.extend(embedding)
return e.SerializeToString()
# Provide embeddings for the first three words in the word map.
records_path = os.path.join(FLAGS.test_tmpdir, 'sstable-00000-of-00001')
writer = tf.python_io.TFRecordWriter(records_path)
writer.write(_TokenEmbedding('.', [1, 2]))
writer.write(_TokenEmbedding(',', [3, 4]))
writer.write(_TokenEmbedding('the', [5, 6]))
del writer
with self.test_session():
embeddings = gen_parser_ops.word_embedding_initializer(
vectors=records_path,
task_context=self._task_context).eval()
self.assertAllClose(
np.array([[1. / (1 + 4) ** .5, 2. / (1 + 4) ** .5],
[3. / (9 + 16) ** .5, 4. / (9 + 16) ** .5],
[5. / (25 + 36) ** .5, 6. / (25 + 36) ** .5]]),
embeddings[:3,])
if __name__ == '__main__':
googletest.main()
| models-master | syntaxnet/syntaxnet/reader_ops_test.py |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A program to generate ASCII trees from conll files."""
import collections
import asciitree
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import sentence_pb2
from syntaxnet.ops import gen_parser_ops
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('task_context',
'syntaxnet/models/parsey_mcparseface/context.pbtxt',
'Path to a task context with inputs and parameters for '
'feature extractors.')
flags.DEFINE_string('corpus_name', 'stdin-conll',
'Path to a task context with inputs and parameters for '
'feature extractors.')
def to_dict(sentence):
"""Builds a dictionary representing the parse tree of a sentence.
Args:
sentence: Sentence protocol buffer to represent.
Returns:
Dictionary mapping tokens to children.
"""
token_str = ['%s %s %s' % (token.word, token.tag, token.label)
for token in sentence.token]
children = [[] for token in sentence.token]
root = -1
for i in range(0, len(sentence.token)):
token = sentence.token[i]
if token.head == -1:
root = i
else:
children[token.head].append(i)
def _get_dict(i):
d = collections.OrderedDict()
for c in children[i]:
d[token_str[c]] = _get_dict(c)
return d
tree = collections.OrderedDict()
tree[token_str[root]] = _get_dict(root)
return tree
def main(unused_argv):
logging.set_verbosity(logging.INFO)
with tf.Session() as sess:
src = gen_parser_ops.document_source(batch_size=32,
corpus_name=FLAGS.corpus_name,
task_context=FLAGS.task_context)
sentence = sentence_pb2.Sentence()
while True:
documents, finished = sess.run(src)
logging.info('Read %d documents', len(documents))
for d in documents:
sentence.ParseFromString(d)
tr = asciitree.LeftAligned()
d = to_dict(sentence)
print 'Input: %s' % sentence.text
print 'Parse:'
print tr(d)
if finished:
break
if __name__ == '__main__':
tf.app.run()
| models-master | syntaxnet/syntaxnet/conll2tree.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from torchvision.utils import make_grid, save_image
import numpy as np
import argparse
import os
import sys
sys.path.append('vae_submodule')
from utils.helpers import FormatterNoDuplicate, check_bounds, set_seed
from utils.visualize import Visualizer
from utils.viz_helpers import get_samples
from disvae.utils.modelIO import load_model, load_metadata
from disvae.models.losses import get_loss_f
from evaluate_amortization_speed_function import evaluate_amortization_speed
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Neutral', call_pdb=1)
def sample_gaussian(mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + std * eps
def unflatten_latent(z_flat):
n = z_flat.shape[-1]
return z_flat[...,:n//2], z_flat[...,n//2:]
def estimate_elbo(x, z_flat, decoder):
latent_dist = unflatten_latent(z_flat)
latent_sample = sample_gaussian(*latent_dist)
latent_sample = latent_sample
recon_batch = decoder(latent_sample)
batch_size = x.shape[0]
log_likelihood = -F.binary_cross_entropy(recon_batch, x, reduce=False).sum(dim=[1,2,3])
mean, logvar = latent_dist
latent_kl = 0.5 * (-1 - logvar + mean.pow(2) + logvar.exp())
kl_to_prior = latent_kl.sum(dim=[-1])
assert log_likelihood.shape == kl_to_prior.shape
loss = log_likelihood - kl_to_prior
return loss
def main():
model_dir = 'vae_submodule/results/VAE_mnist'
meta_data = load_metadata(model_dir)
model = load_model(model_dir).cuda()
model.eval() # don't sample from latent: use mean
dataset = meta_data['dataset']
loss_f = get_loss_f('VAE',
n_data=len(dataset),
device='cuda',
rec_dist='bernoulli',
reg_anneal=0)
batch_size = 1024
num_save = 15
data_samples = get_samples(dataset, batch_size, idcs=[25518, 13361, 22622]).cuda()
def amortization_model(data_samples):
latent_dist = model.encoder(data_samples)
latent_dist_flat = torch.cat(latent_dist, dim=-1)
return latent_dist_flat
def amortization_objective(latent_dist_flat, data_samples):
elbo = estimate_elbo(data_samples, latent_dist_flat, model.decoder)
return elbo
iterate_history, predicted_samples = evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=data_samples,
tag='vae',
fig_ylabel='ELBO',
adam_lr=5e-3,
num_iterations=2000,
maximize=True,
save_iterates=[0, 250, 500, 1000, 2000],
num_save=num_save,
)
iterate_history.append((-1, predicted_samples[:num_save]))
reconstructions = []
for i, latent_dist_flat in iterate_history:
latent_dist = unflatten_latent(latent_dist_flat)
latent_mean = latent_dist[0]
reconstructions.append(1.-model.decoder(latent_mean))
reconstructions.append(1.-data_samples[:num_save])
reconstructions = torch.cat(reconstructions, dim=0)
reconstructions = F.interpolate(reconstructions,
recompute_scale_factor=True, scale_factor=1.5, mode='bilinear')
fname = f'vae-samples.png'
save_image(reconstructions, fname, nrow=num_save)
if __name__ == '__main__':
main()
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_vae.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
params = {
"text.usetex" : True,
"font.family" : "serif",
"font.serif" : ["Computer Modern Serif"]
}
plt.rcParams.update(params)
import os
import time
def evaluate_amortization_speed(
amortization_model,
amortization_objective,
contexts,
tag,
fig_ylabel,
adam_lr=5e-3,
num_iterations=2000,
maximize=False,
iter_history_callback=None,
save_iterates=[],
num_save=8,
):
times = []
n_trials = 10
for i in range(n_trials+1):
start_time = time.time()
predicted_solutions = amortization_model(contexts)
if i > 0:
times.append(time.time()-start_time)
amortized_objectives = amortization_objective(
predicted_solutions, contexts
).cpu().detach()
print(f'solution size: {predicted_solutions.shape[1]}')
print('--- amortization model')
print(f'average objective value: {amortized_objectives.mean():.2f}')
print(f'average runtime: {np.mean(times)*1000:.2f}ms')
iterates = torch.nn.Parameter(torch.zeros_like(predicted_solutions))
opt = torch.optim.Adam([iterates], lr=adam_lr)
objective_history = []
times = []
iterations = []
iterate_history = []
start_time = time.time()
for i in range(num_iterations+1):
objectives = amortization_objective(iterates, contexts)
mean_objective = objectives.mean()
if maximize:
mean_objective *= -1.
opt.zero_grad()
mean_objective.backward()
opt.step()
if i % 50 == 0:
iterations.append(i)
times.append(time.time()-start_time)
objective_history.append((objectives.mean().item(), objectives.std().item()))
print(i, objectives.mean().item())
if i in save_iterates:
iterate_history.append((i, iterates[:num_save].detach().clone()))
times = np.array(times)
figsize = (4,2)
fig, ax = plt.subplots(figsize=figsize, dpi=200)
objective_means, objective_stds = map(np.array, zip(*objective_history))
l, = ax.plot(iterations, objective_means)
ax.axhline(amortized_objectives.mean().cpu().detach(), color='k', linestyle='--')
ax.axhspan(amortized_objectives.mean()-amortized_objectives.std(),
amortized_objectives.mean()+amortized_objectives.std(), color='k', alpha=0.15)
ax.fill_between(
iterations, objective_means-objective_stds, objective_means+objective_stds,
color=l.get_color(), alpha=0.5)
ax.set_xlabel('Adam Iterations')
ax.set_ylabel(fig_ylabel)
ax.set_xlim(0, max(iterations))
# ax.set_ylim(0, 1000)
fig.tight_layout()
fname = f'{tag}-iter.pdf'
print(f'saving to {fname}')
fig.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
fig, ax = plt.subplots(figsize=figsize, dpi=200)
ax.axhline(amortized_objectives.mean(), color='k', linestyle='--')
ax.axhspan(amortized_objectives.mean()-amortized_objectives.std(),
amortized_objectives.mean()+amortized_objectives.std(), color='k', alpha=0.15)
l, = ax.plot(times, objective_means)
ax.fill_between(
times, objective_means-objective_stds, objective_means+objective_stds,
color=l.get_color(), alpha=0.5)
ax.set_xlim(0, max(times))
# ax.set_ylim(0, 1000)
ax.set_xlabel('Runtime (seconds)')
ax.set_ylabel(fig_ylabel)
fig.tight_layout()
fname = f'{tag}-time.pdf'
print(f'saving to {fname}')
fig.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
return iterate_history, predicted_solutions
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_function.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
from torch import nn
import numpy as np
import os
import matplotlib.pyplot as plt
plt.style.use('bmh')
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(
mode='Plain', color_scheme='Neutral', call_pdb=1)
def celestial_to_euclidean(ra, dec):
x = np.cos(dec)*np.cos(ra)
y = np.cos(dec)*np.sin(ra)
z = np.sin(dec)
return x, y, z
def euclidean_to_celestial(x, y, z):
sindec = z
cosdec = np.sqrt(x*x + y*y)
sinra = y / cosdec
cosra = x / cosdec
ra = np.arctan2(sinra, cosra)
dec = np.arctan2(sindec, cosdec)
return ra, dec
def euclidean_to_celestial_th(x, y, z):
sindec = z
cosdec = (x*x + y*y).sqrt()
sinra = y / cosdec
cosra = x / cosdec
ra = torch.atan2(sinra, cosra)
dec = torch.atan2(sindec, cosdec)
return ra, dec
def sphere_dist_th(x,y):
if x.ndim == 1:
x = x.unsqueeze(0)
if y.ndim == 1:
y = y.unsqueeze(0)
assert x.ndim == y.ndim == 2
inner = (x*y).sum(-1)
return torch.arccos(inner)
class c_convex(nn.Module):
def __init__(self, n_components=4, gamma=0.5, seed=None):
super().__init__()
self.n_components = n_components
self.gamma = gamma
# Sample a random c-convex function
if seed is not None:
torch.manual_seed(seed)
self.ys = torch.randn(n_components, 3)
self.ys = self.ys / torch.norm(self.ys, 2, dim=-1, keepdim=True)
self.alphas = .7*torch.rand(self.n_components)
self.params = torch.cat((self.ys.view(-1), self.alphas.view(-1)))
def forward(self, xyz):
# TODO: Could be optimized
cs = []
for y, alpha in zip(self.ys, self.alphas):
ci = 0.5*sphere_dist_th(y, xyz)**2 + alpha
cs.append(ci)
cs = torch.stack(cs)
if self.gamma == None or self.gamma == 0.:
z = cs.min(dim=0).values
else:
z = -self.gamma*(-cs/self.gamma).logsumexp(dim=0)
return z
seeds = [8,9,2,31,4,20,16,7]
fs = [c_convex(seed=i) for i in seeds]
n_params = len(fs[0].params)
class AmortizedModel(nn.Module):
def __init__(self, n_params):
super().__init__()
self.base = nn.Sequential(
nn.Linear(n_params, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, 3)
)
def forward(self, p):
squeeze = p.ndim == 1
if squeeze:
p = p.unsqueeze(0)
assert p.ndim == 2
z = self.base(p)
z = z / z.norm(dim=-1, keepdim=True)
if squeeze:
z = z.squeeze(0)
return z
n_hidden = 128
torch.manual_seed(0)
model = AmortizedModel(n_params=n_params)
opt = torch.optim.Adam(model.parameters(), lr=5e-4)
xs = []
for i in range(100):
losses = []
xis = []
for f in fs:
pred_opt = model(f.params)
xis.append(pred_opt)
losses.append(f(pred_opt))
with torch.no_grad():
xis = torch.stack(xis)
xs.append(xis)
loss = sum(losses)
opt.zero_grad()
loss.backward()
opt.step()
xs = torch.stack(xs, dim=1)
pad = .1
n_sample = 100
ra = np.linspace(-np.pi+pad, np.pi-pad, n_sample)
dec= np.linspace(-np.pi/2+pad, np.pi/2-pad, n_sample)
ra_grid, dec_grid = np.meshgrid(ra,dec)
ra_grid_flat = ra_grid.ravel()
dec_grid_flat = dec_grid.ravel()
x_grid, y_grid, z_grid = celestial_to_euclidean(ra_grid_flat, dec_grid_flat)
p_grid = np.stack((x_grid, y_grid, z_grid), axis=-1)
p_grid_th = torch.from_numpy(p_grid).float()
for i, (f, xs_i) in enumerate(zip(fs, xs)):
nrow, ncol = 1, 1
fig, ax = plt.subplots(
nrow, ncol, figsize=(3*ncol, 2*nrow),
subplot_kw={'projection': 'mollweide'},
gridspec_kw = {'wspace':0, 'hspace':0}
)
with torch.no_grad():
f_grid = f(p_grid_th).numpy()
best_i = f_grid.argmin()
ra_opt, dec_opt= ra_grid_flat[best_i], dec_grid_flat[best_i]
f_grid = f_grid.reshape(ra_grid.shape)
n_levels = 10
ax.contourf(ra_grid, dec_grid, f_grid, n_levels, cmap='Purples')
x,y,z = xs_i.split(1,dim=-1)
ra, dec = euclidean_to_celestial_th(x,y,z)
ax.plot(ra, dec, color='#5499FF', lw=3, ls=':')
ax.scatter(ra_opt, dec_opt, marker='*', color='#AA0000',
s=100, zorder=10)
for s in ax.spines.values():
s.set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
fname = f'paper/fig/sphere/{i}.png'
plt.savefig(fname, transparent=True)
os.system(f'convert -trim {fname} {fname}')
| amortized-optimization-tutorial-main | code/train-sphere.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import argparse
import os
import sys
import pickle as pkl
import shutil
from omegaconf import OmegaConf
from collections import namedtuple
import dmc2gym
import matplotlib.pyplot as plt
plt.style.use('bmh')
from matplotlib import cm
from multiprocessing import Process
from svg.video import VideoRecorder
from svg import utils, dx
from evaluate_amortization_speed_function import evaluate_amortization_speed
def main():
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux',
call_pdb=1)
exp = torch.load('svg_submodule/trained-humanoid/latest.pt')
# Clean up logging after resuming the experiment code
del exp.logger
os.remove('eval.csv')
os.remove('train.csv')
observations = collect_eval_episode(exp)
# First try to predict the maximum value action
def amortization_model(observations):
actions, _, _ = exp.agent.actor(observations, compute_pi=False, compute_log_pi=False)
return actions
def amortization_objective(actions, observations, normalize=True):
q1, q2 = exp.agent.critic(observations, actions)
values = torch.min(q1, q2).squeeze()
if normalize:
values = normalize_values(values)
return values
with torch.no_grad():
expert_actions = amortization_model(observations)
zero_actions = torch.zeros_like(expert_actions)
expert_values = amortization_objective(expert_actions, observations, normalize=False)
zero_values = amortization_objective(zero_actions, observations, normalize=False)
def normalize_values(values):
"""normalize so that the expert value is 0 and the zero action is -1."""
norm_values = (values - expert_values) / (expert_values - zero_values)
# assume we can't do better than the expert.
# otherwise the optimization overfits to the inaccurate model
# and value approximation.
norm_values[norm_values > 0.] = 0.
return norm_values
evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=observations,
tag='control-model-free',
fig_ylabel='Value',
adam_lr=5e-3,
num_iterations=500,
maximize=True,
)
# Next try to predict the solution to the short-horizon model-based
# control problem.
def amortization_model(observations):
num_batch = observations.shape[0]
action_seq, _, _ = exp.agent.dx.unroll_policy(
observations, exp.agent.actor, sample=False, last_u=True)
action_seq_flat = action_seq.transpose(0,1).reshape(num_batch, -1)
return action_seq_flat
def amortization_objective(action_seq_flat, observations, normalize=True):
num_batch = action_seq_flat.shape[0]
action_seq = action_seq_flat.reshape(num_batch, -1, exp.agent.action_dim).transpose(0, 1)
predicted_states = exp.agent.dx.unroll(observations, action_seq[:-1])
all_obs = torch.cat((observations.unsqueeze(0), predicted_states), dim=0)
xu = torch.cat((all_obs, action_seq), dim=2)
dones = exp.agent.done(xu).sigmoid().squeeze(dim=2)
not_dones = 1. - dones
not_dones = utils.accum_prod(not_dones)
last_not_dones = not_dones[-1]
rewards = not_dones * exp.agent.rew(xu).squeeze(2)
q1, q2 = exp.agent.critic(all_obs[-1], action_seq[-1])
q = torch.min(q1, q2).reshape(num_batch)
rewards[-1] = last_not_dones * q
rewards *= exp.agent.discount_horizon.unsqueeze(1)
values = rewards.sum(dim=0)
if normalize:
values = normalize_values(values)
return values
with torch.no_grad():
# used in the normalization
expert_action_seq = amortization_model(observations)
zero_action_seq = torch.zeros_like(expert_action_seq)
expert_values = amortization_objective(expert_action_seq, observations, normalize=False)
zero_values = amortization_objective(zero_action_seq, observations, normalize=False)
evaluate_amortization_speed(
amortization_model=amortization_model,
amortization_objective=amortization_objective,
contexts=observations,
tag='control-model-based',
fig_ylabel='Value',
adam_lr=5e-3,
num_iterations=500,
maximize=True,
)
def collect_eval_episode(exp):
device = 'cuda'
exp.env.set_seed(0)
obs = exp.env.reset()
done = False
total_reward = 0.
step = 0
observations = []
while not done:
if exp.cfg.normalize_obs:
mu, sigma = exp.replay_buffer.get_obs_stats()
obs = (obs - mu) / sigma
obs = torch.FloatTensor(obs).to(device)
observations.append(obs)
action, _, _ = exp.agent.actor(obs, compute_pi=False, compute_log_pi=False)
action = action.clamp(min=exp.env.action_space.low.min(),
max=exp.env.action_space.high.max())
obs, reward, done, _ = exp.env.step(utils.to_np(action.squeeze(0)))
total_reward += reward
step += 1
print(f'+ eval episode reward: {total_reward}')
observations = torch.stack(observations, dim=0)
return observations
if __name__ == '__main__':
main()
| amortized-optimization-tutorial-main | code/evaluate_amortization_speed_control.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
# Initial problem with x^\star
N = 1000
x = np.linspace(-3.8, 5.0, N)
# y = -(x**2)*np.sin(x)
# y = (np.cos(x)**2) #- np.abs(x)
y = -x**2 + 7*np.sin(x+np.pi)
y = y/8.
nrow, ncol = 1, 2
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol*2.5,nrow*1.1), dpi=200)
ax = axs[0]
ax.axhline(0, color='k')
ax.plot(x, y-y.min(), color='k')
ustar = x[y.argmax()]
ax.axvline(ustar, color='#AA0000')
ax.text(0.13, 0.09, '$$\pi^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='left', va='bottom')
ax.axvline(ustar+2, color='#5499FF')
ax.text(0.54, 0.09, r'$$\pi_\theta(x)$$', color='#5499FF',
transform=ax.transAxes, ha='left', va='bottom')
ax.arrow(x=ustar+2, y=0.5, dx=-0.5, dy=0.,
width=0.1, color='#5499FF', zorder=10)
ax.text(0.7, 0.44, '$$Q(x, u)$$',
transform=ax.transAxes, ha='left', va='bottom')
ax.set_xlabel('$$u$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Deterministic Policy', fontsize=12, pad=5)
# ax.set_ylabel('$$Q(x, u)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.1, .44)
ax = axs[1]
y = np.exp(y)
y -= y.min()
ax.plot(x, y, color='k') #, zorder=10)
ax.set_xlabel('$$u$$')
ax.xaxis.set_label_coords(.5, 0.01)
mu, sigma = ustar, 0.8
ystar = np.exp(-.5*((x-mu)/sigma)**2) #/ (sigma*np.sqrt(2.*np.pi))
ystar = ystar * y.sum() / ystar.sum()
ax.plot(x, ystar, color='#AA0000')
mu, sigma = ustar+2, 1.5
yhat = np.exp(-.5*((x-mu)/sigma)**2) #/ (sigma*np.sqrt(2.*np.pi))
yhat = yhat * y.sum() / yhat.sum()
ax.plot(x, yhat, color='#5499FF')
# I = [250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800]
I = [250, 300, 350, 400, 650, 700, 750, 800]
for i in I:
ax.arrow(x=x[i], y=yhat[i], dx=-0.5, dy=0.,
width=0.05,
color='#5499FF',
zorder=10)
ax.text(0.37, 0.74, '$$\pi^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='left', va='bottom')
ax.text(0.6, 0.45, r'$$\pi_\theta(x)$$', color='#5499FF',
transform=ax.transAxes, ha='left', va='bottom')
ax.text(0., 0.43, '$$\mathcal{Q}(x, u)$$',
transform=ax.transAxes, ha='left', va='bottom')
ax.axhline(0., color='k',zorder=-1)
# ax.set_ylabel('$${\mathcal{Q}}(x, u)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.1, .44)
fig.tight_layout()
for ax in axs:
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_title('Stochastic Policy', fontsize=12, pad=5)
fname = 'ctrl.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/ctrl.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(2.5,1.5), dpi=200)
def f(x):
return np.cos(x) + 0.2*np.abs(x-np.pi/2)
N = 100
x = np.linspace(-4.*np.pi, 2*np.pi, N)
y = f(x)
ax.plot(x, y, color='k')
sigmas = [1., 1.5, 2.5]
for sigma in sigmas:
ys = []
# Inefficiently doing this...
for xi in x:
eps = sigma*np.random.randn(50000)
yi = np.mean(f(xi+eps))
ys.append(yi)
ax.plot(x, ys, alpha=1., lw=2)
# ax.set_xlabel(r'$$\theta$$')
# ax.xaxis.set_label_coords(.5, 0.01)
# ax.set_ylabel(r'$${\mathcal L}(\hat y_\theta)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.07, .44)
# ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'smoothed-loss.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/smoothed-loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import shutil
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"],
"text.latex.preamble": r"\usepackage{amsfonts}",
})
# We will define a 1D density parameterized by \phi to maximize over
# with gradient steps, and implement this by discretizing over the domain.
phi = jnp.array([0., 1.5, .7, 6.])
@jax.jit
def compute_dist(x, phi):
# Compute values of at the discretized points in the domain.
v = jnp.exp(-0.5*((x-phi[0])/phi[1])**2 + phi[2]*jnp.sin(x*phi[3]))
dx = x[1:]-x[:-1]
y = v/sum(v[1:]*dx) # Normalize to be a proper distribution.
flow_x = flow(x, y) # Constrain the mean and variance.
# Compute the new probabilities.
J_flow = jnp.diag(jax.jacfwd(flow)(x, y))
flow_y = y / J_flow
return flow_x, flow_y
@jax.jit
def mean(x, y):
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return sum(x*y*dx)
@jax.jit
def std(x, y):
mu = mean(x,y)
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return jnp.sqrt(sum(((x-mu)**2)*y*dx))
@jax.jit
def entr(x, y):
dx = x[1:]-x[:-1]
y = y[1:]
return -sum(y*jnp.log(y+1e-8)*dx)
@jax.jit
def flow(x, y):
# Normalize the domain so that the distribution has
# zero mean and identity variance.
return (x - mean(x,y)) / std(x, y)
@jax.jit
def loss(x, phi):
x, y = compute_dist(x, phi)
return -entr(x, y)
# Prepare the output directory
d = 'maxent-animation'
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def plot(t):
nrow, ncol = 1, 2
fig, axs = plt.subplots(nrow, ncol, figsize=(ncol*3, nrow*2), dpi=200,
gridspec_kw={'wspace': .3, 'hspace': 0})
ax = axs[0]
ax.plot(entrs, color='k')
ax.set_xlabel('Updates', fontsize=10)
ax.set_title(r'Entropy ($\mathbb{H}_p[X]$)', fontsize=10)
ax.set_xlim(0, n_step)
ax.set_ylim(1.3, 1.45)
ax = axs[1]
ax.plot(x, y, color='k')
ax.set_ylim(0, 0.7)
ax.set_xlim(-3, 3)
ax.set_xlabel('$x$', fontsize=10)
ax.set_title('$p(x)$', fontsize=10)
for ax in axs:
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.suptitle(
r'$\max_{p} \mathbb{H}_p[X]\; \rm{subject\; to}\; \mathbb{E}_p[X] = \mu\;\rm{and}\;\rm{Var}_p[X]=\Sigma$')
fig.subplots_adjust(top=0.7)
fname = f'{d}/{t:04d}.png'
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
# os.system(f'pdfcrop {fname} {fname}')
os.system(f'convert -trim {fname} {fname}')
# jitted derivative of the loss with respect to phi
dloss_dphi = jax.jit(jax.grad(loss, argnums=1))
# Number of discretization points in the domain
# Decrease this to run faster
N = 1000
# The domain of the unprojected distribution
x_unproj = jnp.linspace(-5.0, 5.0, N)
entrs = []
x, y = compute_dist(x_unproj, phi)
entrs.append(entr(x,y))
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
# The step size can be much larger but it's set to this for the animation.
n_step = 100
step_size = 0.13
for t in range(n_step):
# Take a gradient step with respect to the
# parameters of the distribution
phi -= step_size*dloss_dphi(x_unproj, phi)
x, y = compute_dist(x_unproj, phi)
entrs.append(entr(x,y))
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
plot(t)
# By the end, we see that the entropy is the true maximal entropy
# of the Gaussian of (1/2)log(2\pi)+(1/2) \approx 1.42.
os.system(f'convert -delay 10 -loop 0 {d}/*.png {d}/maxent.gif')
| amortized-optimization-tutorial-main | code/figures/maxent-animation.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
# Initial problem with x^\star
N = 1000
x = np.linspace(-5.0, 5.0, N)
y = np.linspace(-10.0, 10.0, N)
X, Y = np.meshgrid(x, y)
Z = (Y-np.sin(X)-X*(1+0.1*np.cos(X)))**2
Z = 1./(1.+np.exp(-Z/80.))
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
I = np.argmin(Z, axis=0)
xstar, ystar = x, y[I]
ax.plot(xstar, ystar, color='#AA0000', lw=3)
ax.text(.92, .8, '$$y^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='right', va='top')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'opt.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# Regression loss
xhat, yhat= xstar.copy(), ystar.copy()
yhat = -0.5*yhat + 0.0*xhat*np.maximum(xhat, 0.) - \
0.23*xhat*np.minimum(xhat, 0.)
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
I = np.argmin(Z, axis=0)
xstar, ystar = x, y[I]
ax.plot(xstar, ystar, color='#AA0000', lw=3)
ax.text(.92, .8, '$$y^\star(x)$$', color='#AA0000',
transform=ax.transAxes, ha='right', va='top')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
ax.text(0.3, .57, r'$$\hat y_\theta(x)$$', color='#5499FF',
bbox=dict(facecolor='white', pad=0, alpha=0.6, edgecolor='none'),
transform=ax.transAxes, ha='left', va='bottom')
n_reg = 15
pad = 35
I = np.round(np.linspace(pad, len(y) - 1 - pad, n_reg)).astype(int)
for idx in I:
ax.plot(
(xstar[idx], xhat[idx]), (yhat[idx], ystar[idx]),
color='k', lw=1, solid_capstyle='round')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Regression-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-reg.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# Objective loss
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.plot(xstar, ystar, color='#AA0000', lw=3, ls='--')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
I = np.round(np.linspace(pad, len(y) - 1 - pad, n_reg)).astype(int)
def f(x,y):
z = y-jnp.sin(x)-x*1.+0.1*jnp.cos(x)
z = z**2
z = 1./(1.+jnp.exp(-z/80.))
return z
df = jax.grad(f, argnums=1)
for idx in I:
x,y = jnp.array(xhat[idx]), jnp.array(yhat[idx])
z = f(x,y)
dz = df(x,y)
ax.quiver(
xhat[idx], yhat[idx], 0., -dz,
color='k', lw=1, scale=.2, zorder=10) #, solid_capstyle='round')
ax.set_ylabel('$$y$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.set_title('Objective-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-obj.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
# RL loss
fig, ax = plt.subplots(figsize=(2,1.5), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples')
ax.plot(xstar, ystar, color='#AA0000', lw=3, ls='--')
ax.plot(xhat, yhat, color='#5499FF', lw=3)
np.random.seed(2)
for _ in range(20):
p = np.linspace(0, 3., len(xhat))
p = p*np.flip(p)
q = 0.04*np.random.randn(len(xhat))
q = np.cumsum(q, axis=-1)
q = q*np.flip(q)
pert = 0.3*(p+q)*np.random.randn()
ax.plot(xhat, yhat+pert, color='#5499FF', lw=1, alpha=0.3)
# ax.set_xlabel('$$x$$')
ax.xaxis.set_label_coords(.5, 0.01)
# ax.set_title('RL-Based', fontsize=12, pad=0)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'learning-rl.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/main-example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
N = 1000
x = np.linspace(-5., 5.0, N)
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
y = x
ax.plot(x, y, color='k', linestyle='--', alpha=.5)
y = -2.*np.sin(x)+0.9*x*(1+0.1*np.cos(x))**2
ax.plot(x, y, color='k')
fp = max(x[np.abs(y-x) <= 5e-3]) # Numerically find the fixed-point :)
ax.scatter([0], [0], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
ax.scatter([fp], [fp], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
ax.scatter([-fp], [-fp], color='#AA0000', lw=1, s=70, zorder=10, marker='*')
# ax.set_ylabel('$$g(y)$$', rotation=0, labelpad=0)
# ax.yaxis.set_label_coords(-.07, .44)
# ax.set_xlabel('$$y$$')
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'fp.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/fixed-point.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(1.5,1.5), dpi=200)
N = 1000
x = np.linspace(-5.0, 5.0, N)
y = np.linspace(-5.0, 5.0, N)
X, Y = np.meshgrid(x, y)
a,b = 0., 10.
Z = X**2 + Y**2 + 1.4*X*Y
Z = 1./(1.+np.exp(-Z/10.))
fig, ax = plt.subplots(figsize=(2,1.7), dpi=200)
CS = ax.contourf(X, Y, Z, cmap='Purples', alpha=0.8)
Z = X**2 + Y**2
CS = ax.contour(X, Y, Z, colors='k', alpha=.7, linewidths=1, levels=5)
ax.scatter([0], [0], color='#AA0000', lw=1, s=50, zorder=10, marker='*')
ax.set_ylabel('$$y_1$$', rotation=0, labelpad=0)
ax.yaxis.set_label_coords(-.07, .44)
ax.set_xlabel('$$y_0$$')
ax.xaxis.set_label_coords(.5, 0.01)
ax.text(0., 1., r'$$f(y; x)$$', color='#491386',
bbox=dict(facecolor='white', pad=0, alpha=0.9, edgecolor='none'),
transform=ax.transAxes, ha='left', va='top')
ax.text(.3, .3, '$$y^\star(x)$$', color='#AA0000',
ha='left', va='bottom')
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'loss-comp.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/loss-comp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
N = 1000
y = np.linspace(-2.0, 2.0, N)
z = -y**3 - 10.*y
ax.plot(y, z, color='k')
I = N // 5
y0, z0 = y[I], z[I]
ax.scatter(y0, z0, color='#5499FF', lw=1, s=50, zorder=10, marker='.')
ax.text(y0, z0-3, r'$$\hat y^0_\theta$$', color='#5499FF',
ha='right', va='top')
lams = np.linspace(0., 12., 15)
for lam in lams:
z_ = z + (lam/2)*(y-y0)**2
ax.plot(y, z_, color='k', alpha=0.2)
# ax.set_title('$$f(y) + {\lambda\over 2}||y-\hat y_0||_2^2$$', size=10)
# ax.set_xlabel('$$y$$')
# ax.xaxis.set_label_coords(.5, 0.01)
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'imaml.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/imaml.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import jax
import jax.numpy as jnp
import os
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
plt.style.use('bmh')
phi = jnp.array([0., .7, 4.]) # Parameters to learn
@jax.jit
def compute_dist(x, phi):
# Compute values at the discretized points in the domain
v = jnp.exp(-0.5*(x-phi[0])**2 + phi[1]*jnp.sin(x*phi[2]))
dx = x[1:]-x[:-1]
y = v/sum(v[1:]*dx) # Normalize to be a proper distribution.
flow_x = flow(x, y) # Constrain the mean and variance.
J_flow = jnp.diag(jax.jacfwd(flow)(x, y))
flow_y = y / J_flow
return flow_x, flow_y
@jax.jit
def mean(x, y):
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return sum(x*y*dx)
@jax.jit
def std(x, y):
mu = mean(x,y)
dx = x[1:]-x[:-1]
x = x[1:]
y = y[1:]
return jnp.sqrt(sum(((x-mu)**2)*y*dx))
@jax.jit
def entr(x, y):
dx = x[1:]-x[:-1]
y = y[1:]
return -sum(y*jnp.log(y+1e-8)*dx)
@jax.jit
def flow(x, y):
# Normalize the domain so that the distribution has
# zero mean and identity variance.
return (x - mean(x,y)) / std(x, y)
@jax.jit
def loss(x, phi):
x, y = compute_dist(x, phi)
return -entr(x, y)
dloss_dphi = jax.jit(jax.grad(loss, argnums=1))
fig, ax = plt.subplots(figsize=(2,1.3), dpi=200)
N = 1000 # Number of discretization points in the domain
# The domain of the unprojected distribution
x_unproj = jnp.linspace(-5.0, 5.0, N)
# Plot the initialization
x, y = compute_dist(x_unproj, phi)
ax.plot(x, y, color='k', alpha=0.5)
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
for t in range(20):
# Take a gradient step with respect to the
# parameters of the distribution
phi -= dloss_dphi(x_unproj, phi)
x, y = compute_dist(x_unproj, phi)
ax.plot(x, y, color='k', alpha=0.2)
print(f'entr={entr(x,y):.2f} (mean={mean(x, y):.2f} std={std(x,y):.2f})')
fig.tight_layout()
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fname = 'maxent.pdf'
plt.savefig(fname, transparent=True)
os.system(f'pdfcrop {fname} {fname}')
| amortized-optimization-tutorial-main | code/figures/maxent.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import utils.modelZoo as modelZoo
from utils.load_utils import *
DATA_PATHS = {
#'video_data/Oliver/train/':1,
#'video_data/Chemistry/train/':2,
'video_data/Seth/train/':5,
#'video_data/Conan/train/':6,
}
#######################################################
## main training function
#######################################################
def main(args):
## variables
learning_rate = args.learning_rate
pipeline = args.pipeline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
currBestLoss = 1e3
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
## DONE variables
## set up generator model
args.model = 'regressor_fcn_bn_32'
generator = getattr(modelZoo, args.model)()
generator.build_net(feature_in_dim, feature_out_dim, require_image=args.require_image)
generator.cuda()
reg_criterion = nn.L1Loss()
g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate, weight_decay=1e-5)
generator.train()
## set up discriminator model
args.model = 'regressor_fcn_bn_discriminator'
discriminator = getattr(modelZoo, args.model)()
discriminator.build_net(feature_out_dim)
discriminator.cuda()
gan_criterion = nn.MSELoss()
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate, weight_decay=1e-5)
discriminator.train()
## DONE model
## load data from saved files
data_tuple = load_data(args, rng)
if args.require_image:
train_X, train_Y, test_X, test_Y, train_ims, test_ims = data_tuple
else:
train_X, train_Y, test_X, test_Y = data_tuple
train_ims, test_ims = None, None
## DONE: load data from saved files
## training job
kld_weight = 0.05
prev_save_epoch = 0
patience = 20
for epoch in range(args.num_epochs):
args.epoch = epoch
## train discriminator
if epoch > 100 and (epoch - prev_save_epoch) > patience:
print('early stopping at:', epoch)
break
if epoch > 0 and epoch % 3 == 0:
train_discriminator(args, rng, generator, discriminator, gan_criterion, d_optimizer, train_X, train_Y, train_ims=train_ims)
else:
train_generator(args, rng, generator, discriminator, reg_criterion, gan_criterion, g_optimizer, train_X, train_Y, train_ims=train_ims)
currBestLoss = val_generator(args, generator, discriminator, reg_criterion, g_optimizer, test_X, test_Y, currBestLoss, test_ims=test_ims)
#######################################################
## local helper methods
#######################################################
## function to load data from external files
def load_data(args, rng):
gt_windows = None
quant_windows = None
p0_paths = None
hand_ims = None
## load from external files
for key, value in DATA_PATHS.items():
key = os.path.join(args.base_path, key)
curr_p0, curr_p1, curr_paths, _ = load_windows(key, args.pipeline, require_image=args.require_image)
if gt_windows is None:
if args.require_image:
hand_ims = curr_p0[1]
curr_p0 = curr_p0[0]
gt_windows = curr_p0
quant_windows = curr_p1
p0_paths = curr_paths
else:
if args.require_image:
hand_ims = np.concatenate((hand_ims, curr_p0[1]), axis=0)
curr_p0 = curr_p0[0]
gt_windows = np.concatenate((gt_windows, curr_p0), axis=0)
quant_windows = np.concatenate((quant_windows, curr_p1), axis=0)
p0_paths = np.concatenate((p0_paths, curr_paths), axis=0)
print '===> in/out', gt_windows.shape, quant_windows.shape
if args.require_image:
print "===> hand_ims", hand_ims.shape
## DONE load from external files
## shuffle and set train/validation
N = gt_windows.shape[0]
train_N = int(N * 0.7)
idx = np.random.permutation(N)
train_idx, test_idx = idx[:train_N], idx[train_N:]
train_X, test_X = gt_windows[train_idx, :, :], gt_windows[test_idx, :, :]
train_Y, test_Y = quant_windows[train_idx, :, :], quant_windows[test_idx, :, :]
if args.require_image:
train_ims, test_ims = hand_ims[train_idx,:,:], hand_ims[test_idx,:,:]
train_ims = train_ims.astype(np.float32)
test_ims = test_ims.astype(np.float32)
print "====> train/test", train_X.shape, test_X.shape
train_X = np.swapaxes(train_X, 1, 2).astype(np.float32)
train_Y = np.swapaxes(train_Y, 1, 2).astype(np.float32)
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
test_Y = np.swapaxes(test_Y, 1, 2).astype(np.float32)
body_mean_X, body_std_X, body_mean_Y, body_std_Y = calc_standard(train_X, train_Y, args.pipeline)
np.savez_compressed(args.model_path + '{}{}_preprocess_core.npz'.format(args.tag, args.pipeline),
body_mean_X=body_mean_X, body_std_X=body_std_X,
body_mean_Y=body_mean_Y, body_std_Y=body_std_Y)
train_X = (train_X - body_mean_X) / body_std_X
test_X = (test_X - body_mean_X) / body_std_X
train_Y = (train_Y - body_mean_Y) / body_std_Y
test_Y = (test_Y - body_mean_Y) / body_std_Y
print("=====> standardization done")
# Data shuffle
I = np.arange(len(train_X))
rng.shuffle(I)
train_X = train_X[I]
train_Y = train_Y[I]
if args.require_image:
train_ims = train_ims[I]
return (train_X, train_Y, test_X, test_Y, train_ims, test_ims)
## DONE shuffle and set train/validation
return (train_X, train_Y, test_X, test_Y)
## calc temporal deltas within sequences
def calc_motion(tensor):
res = tensor[:,:,:1] - tensor[:,:,:-1]
return res
## training discriminator functin
def train_discriminator(args, rng, generator, discriminator, gan_criterion, d_optimizer, train_X, train_Y, train_ims=None):
generator.eval()
discriminator.train()
batchinds = np.arange(train_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
rng.shuffle(batchinds)
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = train_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = train_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = train_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
with torch.no_grad():
fake_data = generator(inputData, image_=imsData).detach()
fake_motion = calc_motion(fake_data)
real_motion = calc_motion(outputGT)
fake_score = discriminator(fake_motion)
real_score = discriminator(real_motion)
d_loss = gan_criterion(fake_score, torch.zeros_like(fake_score)) + gan_criterion(real_score, torch.ones_like(real_score))
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
## training generator function
def train_generator(args, rng, generator, discriminator, reg_criterion, gan_criterion, g_optimizer, train_X, train_Y, train_ims=None):
discriminator.eval()
generator.train()
batchinds = np.arange(train_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
rng.shuffle(batchinds)
avgLoss = 0.
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = train_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = train_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = train_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
output = generator(inputData, image_=imsData)
fake_motion = calc_motion(output)
with torch.no_grad():
fake_score = discriminator(fake_motion)
fake_score = fake_score.detach()
g_loss = reg_criterion(output, outputGT) + gan_criterion(fake_score, torch.ones_like(fake_score))
g_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()
avgLoss += g_loss.item() * args.batch_size
if bii % args.log_step == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'.format(args.epoch, args.num_epochs, bii, totalSteps,
avgLoss / (totalSteps * args.batch_size),
np.exp(avgLoss / (totalSteps * args.batch_size))))
## validating generator function
def val_generator(args, generator, discriminator, reg_criterion, g_optimizer, test_X, test_Y, currBestLoss, test_ims=None):
testLoss = 0
generator.eval()
discriminator.eval()
batchinds = np.arange(test_X.shape[0] // args.batch_size)
totalSteps = len(batchinds)
for bii, bi in enumerate(batchinds):
## setting batch data
idxStart = bi * args.batch_size
inputData_np = test_X[idxStart:(idxStart + args.batch_size), :, :]
outputData_np = test_Y[idxStart:(idxStart + args.batch_size), :, :]
inputData = Variable(torch.from_numpy(inputData_np)).cuda()
outputGT = Variable(torch.from_numpy(outputData_np)).cuda()
imsData = None
if args.require_image:
imsData_np = test_ims[idxStart:(idxStart + args.batch_size), :, :]
imsData = Variable(torch.from_numpy(imsData_np)).cuda()
## DONE setting batch data
output = generator(inputData, image_=imsData)
g_loss = reg_criterion(output, outputGT)
testLoss += g_loss.item() * args.batch_size
testLoss /= totalSteps * args.batch_size
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'.format(args.epoch, args.num_epochs, bii, totalSteps,
testLoss,
np.exp(testLoss)))
print('----------------------------------')
if testLoss < currBestLoss:
prev_save_epoch = args.epoch
checkpoint = {'epoch': args.epoch,
'state_dict': generator.state_dict(),
'g_optimizer': g_optimizer.state_dict()}
fileName = args.model_path + '/{}{}_checkpoint_e{}_loss{:.4f}.pth'.format(args.tag, args.pipeline, args.epoch, testLoss)
torch.save(checkpoint, fileName)
currBestLoss = testLoss
return currBestLoss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--base_path', type=str, required=True, help='path to the directory where the data files are stored')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline specifying which input/output joints to use')
parser.add_argument('--num_epochs', type=int, default=100, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=64, help='batch size for training')
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate for training G and D')
parser.add_argument('--require_image', action='store_true', help='use additional image feature or not')
parser.add_argument('--model_path', type=str, required=True , help='path for saving trained models')
parser.add_argument('--log_step', type=int , default=100, help='step size for prining log info')
parser.add_argument('--tag', type=str, default='', help='prefix for naming purposes')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | train_gan.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import json
import numpy as np
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
import utils.modelZoo as modelZoo
from utils.load_utils import *
def main(args):
## variable initializations
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
pipeline = args.pipeline
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
## DONE variable initializations
## set up model/ load pretrained model
args.model = 'regressor_fcn_bn_32'
model = getattr(modelZoo,args.model)()
model.build_net(feature_in_dim, feature_out_dim, require_image=args.require_image)
pretrain_model = args.checkpoint
loaded_state = torch.load(pretrain_model, map_location=lambda storage, loc: storage)
model.load_state_dict(loaded_state['state_dict'], strict=False)
model = model.eval()
model.cuda()
criterion = nn.MSELoss()
## DONE set up model/ load pretrained model
## load/prepare data from external files
test_X, test_Y, test_Y_paths, _ = load_windows(args.data_dir, args.pipeline, require_image=args.require_image)
if args.require_image:
test_ims = test_X[1].astype(np.float32)
test_X = test_X[0]
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
test_Y = np.swapaxes(test_Y, 1, 2).astype(np.float32)
# standardize
checkpoint_dir = os.path.split(pretrain_model)[0]
model_tag = os.path.basename(args.checkpoint).split(args.pipeline)[0]
preprocess = np.load(os.path.join(checkpoint_dir,'{}{}_preprocess_core.npz'.format(model_tag, args.pipeline)))
body_mean_X = preprocess['body_mean_X']
body_std_X = preprocess['body_std_X']
body_mean_Y = preprocess['body_mean_Y']
body_std_Y = preprocess['body_std_Y']
test_X = (test_X - body_mean_X) / body_std_X
test_Y = (test_Y - body_mean_Y) / body_std_Y
## DONE load/prepare data from external files
## pass loaded data into training
inputData = Variable(torch.from_numpy(test_X)).cuda()
outputGT = Variable(torch.from_numpy(test_Y)).cuda()
imsData = None
if args.require_image:
imsData = Variable(torch.from_numpy(test_ims)).cuda()
output = model(inputData, image_=imsData)
error = criterion(output, outputGT).data
print(">>> TOTAL ERROR: ", error)
print('----------------------------------')
## DONE pass loaded data into training
## preparing output for saving
output_np = output.data.cpu().numpy()
output_gt = outputGT.data.cpu().numpy()
output_np = output_np * body_std_Y + body_mean_Y
output_gt = output_gt * body_std_Y + body_mean_Y
output_np = np.swapaxes(output_np, 1, 2).astype(np.float32)
output_gt = np.swapaxes(output_gt, 1, 2).astype(np.float32)
save_results(test_Y_paths, output_np, args.pipeline, args.base_path, tag=args.tag)
## DONE preparing output for saving
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True, help='path to checkpoint file (pretrained model)')
parser.add_argument('--base_path', type=str, required=True, help='absolute path to the base directory where all of the data is stored')
parser.add_argument('--data_dir', type=str, required=True, help='path to test data directory')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline specifying which input/output joints to use')
parser.add_argument('--require_image', action='store_true', help='step size for prining log info')
parser.add_argument('--tag', type=str, default='', help='prefix for naming purposes')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | sample.py |
import tensorflow as tf
import os
import sys
from nets.CPM import CPM
from data.DomeReader import DomeReader
from data.TsimonDBReader import TsimonDBReader
from data.RHDReader import RHDReader
from data.STBReader import STBReader
from data.MultiDataset import combineMultiDataset
from data.GAneratedReader import GAneratedReader
import utils.general
import utils.PAF
from utils.multigpu import average_gradients
from tensorflow.python.client import device_lib
num_gpu = sum([_.device_type == 'GPU' for _ in device_lib.list_local_devices()])
fine_tune = False
already_trained = 50000
train_para = {'lr': [1e-4, 1e-5],
'lr_iter': [80000],
'max_iter': 160000,
'show_loss_freq': 10,
'snapshot_freq': 5000,
'snapshot_dir': 'snapshots/Final_qual_hand_clear_zoom',
'finetune_dir': 'snapshots/Final_qual_hand_clear',
'loss_weight_PAF': 1.0,
}
PATH_TO_SNAPSHOTS = './{}/model-{}'.format(train_para['finetune_dir'], already_trained) # only used when USE_RETRAINED is true
ignore_PAF_2D = False
with tf.Graph().as_default(), tf.device('/cpu:0'):
domereader = DomeReader(mode='training', batch_size=5, shuffle=True, objtype=1, crop_noise=True)
domereader.crop_scale_noise_sigma = 0.4
domereader.crop_offset_noise_sigma = 0.2
rhdreader = RHDReader(mode='training', batch_size=2, shuffle=True, objtype=1, crop_noise=True)
rhdreader.crop_scale_noise_sigma = 0.4
rhdreader.crop_offset_noise_sigma = 0.2
tsimonreader = TsimonDBReader(mode='training', batch_size=1, shuffle=True, objtype=1, crop_noise=True)
tsimonreader.crop_scale_noise_sigma = 0.4
tsimonreader.crop_offset_noise_sigma = 0.2
# ganeratedReader = GAneratedReader(mode='training', batch_size=2, shuffle=True, objtype=1, crop_noise=True)
data = combineMultiDataset([
domereader.get(),
rhdreader.get(),
tsimonreader.get(),
# ganeratedReader.get(),
],
name_wanted=['image_crop', 'scoremap2d', 'hand_valid', 'PAF', 'PAF_type', 'mask_crop'])
# data = domereader.get()
# stbreader = STBReader(mode='training', batch_size=4, shuffle=True, objtype=1, crop_noise=True)
# data = stbreader.get()
for k, v in data.items():
data[k] = tf.split(v, num_gpu, 0)
if fine_tune:
global_step = tf.Variable(already_trained + 1, trainable=False, name="global_step")
else:
global_step = tf.Variable(0, trainable=False, name="global_step")
lr_scheduler = utils.general.LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter'])
lr = lr_scheduler.get_lr(global_step)
opt = tf.train.AdamOptimizer(lr)
tower_grads = []
tower_losses = []
tower_losses_PAF = []
tower_losses_2d = []
with tf.variable_scope(tf.get_variable_scope()):
for ig in range(num_gpu):
with tf.device('/gpu:%d' % ig):
# build network
net = CPM(out_chan=22, numPAF=20, crop_size=368, withPAF=True, PAFdim=3)
predicted_scoremaps, _, predicted_PAFs = net.inference(data['image_crop'][ig], train=True)
# Loss
assert len(predicted_scoremaps) == 6
s = data['scoremap2d'][ig].get_shape().as_list()
valid = tf.concat([data['hand_valid'][ig], tf.ones((s[0], 1), dtype=tf.bool)], axis=1)
valid = tf.cast(valid, tf.float32)
mask_scoremap = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
loss_2d = 0.0
# multiply mask_scoremap to mask out the invalid areas
for ip, predicted_scoremap in enumerate(predicted_scoremaps):
resized_scoremap = tf.image.resize_images(predicted_scoremap, (s[1], s[2]))
mean_over_pixel = tf.reduce_sum(tf.square((resized_scoremap - data['scoremap2d'][ig]) * mask_scoremap), [1, 2]) / (tf.reduce_sum(mask_scoremap, [1, 2]) + 1e-6)
loss_2d_ig = tf.reduce_sum(valid * mean_over_pixel) / (tf.reduce_sum(valid) + 1e-6)
loss_2d += loss_2d_ig
loss_2d /= len(predicted_scoremaps)
assert 'PAF' in data
loss_PAF = 0.0
valid_PAF = tf.cast(utils.PAF.getValidPAF(data['hand_valid'][ig], 1, PAFdim=3), tf.float32)
# multiply mask_PAF to mask out the invalid areas
s = data['PAF'][ig].get_shape().as_list()
mask_PAF = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1, 3]) # detach x, y, z
if ignore_PAF_2D:
mask_PAF2D = mask_PAF * tf.constant([0, 0, 0], dtype=tf.float32)
else:
mask_PAF2D = mask_PAF * tf.constant([1, 1, 0], dtype=tf.float32) # for the 2D case
mask_PAF = tf.where(data['PAF_type'][ig], mask_PAF, mask_PAF2D) # take out corresponding mask by PAF type
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1])
for ip, pred_PAF in enumerate(predicted_PAFs):
resized_PAF = tf.image.resize_images(pred_PAF, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
channelWisePAF = tf.reshape(resized_PAF, [s[0], s[1], s[2], -1, 3])
PAF_x2y2 = tf.sqrt(tf.reduce_sum(tf.square(channelWisePAF[:, :, :, :, 0:2]), axis=4)) + 1e-6
PAF_normed_x = channelWisePAF[:, :, :, :, 0] / PAF_x2y2
PAF_normed_y = channelWisePAF[:, :, :, :, 1] / PAF_x2y2
PAF_normed_z = tf.zeros(PAF_normed_x.get_shape(), dtype=tf.float32)
normed_PAF = tf.stack([PAF_normed_x, PAF_normed_y, PAF_normed_z], axis=4)
normed_PAF = tf.reshape(normed_PAF, [s[0], s[1], s[2], -1])
normed_PAF = tf.where(tf.logical_and(tf.not_equal(data['PAF'][ig], 0.0), tf.not_equal(resized_PAF, 0.0)),
normed_PAF, tf.zeros((s[0], s[1], s[2], s[3]), dtype=tf.float32)) # use normed_PAF only in pixels where PAF is not zero
final_PAF = tf.where(data['PAF_type'][ig], resized_PAF, normed_PAF)
# mean_over_pixel = tf.reduce_sum(tf.square((resized_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
mean_over_pixel = tf.reduce_sum(tf.square((final_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
loss_PAF_ig = tf.reduce_sum(valid_PAF * mean_over_pixel) / (tf.reduce_sum(valid_PAF) + 1e-6)
loss_PAF += loss_PAF_ig
loss_PAF /= len(predicted_PAFs)
loss = loss_2d + loss_PAF * train_para['loss_weight_PAF']
tf.get_variable_scope().reuse_variables()
tower_losses.append(loss)
tower_losses_PAF.append(loss_PAF)
tower_losses_2d.append(loss_2d)
grad = opt.compute_gradients(loss)
tower_grads.append(grad)
total_loss = tf.reduce_mean(tower_losses)
total_loss_PAF = tf.reduce_mean(tower_losses_PAF)
total_loss_2d = tf.reduce_mean(tower_losses_2d)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('loss_PAF', total_loss_PAF)
tf.summary.scalar('loss_2d', total_loss_2d)
# init weights
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_para['snapshot_dir'] + '/train', sess.graph)
if not fine_tune:
start_iter = 0
net.init_vgg(sess)
else:
saver.restore(sess, PATH_TO_SNAPSHOTS)
start_iter = already_trained + 1
# snapshot dir
if not os.path.exists(train_para['snapshot_dir']):
os.mkdir(train_para['snapshot_dir'])
print('Created snapshot dir:', train_para['snapshot_dir'])
# Training loop
print('Starting to train ...')
for i in range(start_iter, train_para['max_iter']):
summary, _, loss_v, loss_2d_v, loss_PAF_v = sess.run([merged, apply_gradient_op, total_loss, total_loss_2d, total_loss_PAF])
train_writer.add_summary(summary, i)
if (i % train_para['show_loss_freq']) == 0:
print('Iteration %d\t Loss %.1e, Loss_2d %.1e, Loss_PAF %.1e' % (i, loss_v, loss_2d_v, loss_PAF_v))
sys.stdout.flush()
if (i % train_para['snapshot_freq']) == 0:
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=i)
print('Saved a snapshot.')
sys.stdout.flush()
print('Training finished. Saving final snapshot.')
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=train_para['max_iter'])
| body2hands-main | visualization/POF/training_PAF_hand.py |
import os
import numpy as np
import numpy.linalg as nl
import json
import pickle
import argparse
map_body25_to_body19 = list(range(8)) + list(range(9, 25)) # total of 24
parser = argparse.ArgumentParser()
parser.add_argument('--seqName', '-n', type=str)
parser.add_argument('--rootDir', '-r', type=str)
parser.add_argument('--count', '-c', type=int)
args = parser.parse_args()
seqName = args.seqName
root = args.rootDir
calib_file = os.path.join(root, 'calib.json')
with open(calib_file) as f:
calib_data = json.load(f)
frameRange = range(1, args.count + 1)
person_idx = -1
bs = []
ls = []
rs = []
fs = []
img_dirs = []
frame_indices = []
for i in frameRange:
img_file = os.path.join(root, "raw_image", '{}_{:08d}.png'.format(seqName, i))
assert os.path.isfile(img_file)
annot_2d = os.path.join(root, 'openpose_result', '{}_{:08d}_keypoints.json'.format(seqName, i))
assert os.path.exists(annot_2d)
with open(annot_2d) as f:
data = json.load(f)
# ideally there should be only one person
assert len(data['people']) == 1
ip = 0
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
bs.append(joint2d[map_body25_to_body19])
fs.append(face2d)
ls.append(left_hand2d)
rs.append(right_hand2d)
img_dirs.append(img_file)
frame_indices.append(i)
img_dirs = np.array(img_dirs)
bs = np.array(bs)
ls = np.array(ls)
rs = np.array(rs)
fs = np.array(fs)
frame_indices = np.array(frame_indices)
print('Openpose output collected: data dimension:')
print((len(ls), len(rs), len(fs), len(bs), len(img_dirs), len(frame_indices)))
with open('{}/{}.pkl'.format(root, seqName), 'wb') as f:
pickle.dump((bs, ls, rs, fs, img_dirs, calib_data, frame_indices), f)
| body2hands-main | visualization/POF/collect_openpose.py |
import tensorflow as tf
import os
import sys
from nets.CPM import CPM
from nets.Hourglass import Hourglass
from data.DomeReader import DomeReader
from data.HumanReader import HumanReader
from data.MultiDataset import combineMultiDataset
from data.COCOReader import COCOReader
import pickle
import utils.general
import utils.PAF
from utils.multigpu import average_gradients
from tensorflow.python.client import device_lib
num_gpu = sum([_.device_type == 'GPU' for _ in device_lib.list_local_devices()])
fine_tune = True
already_trained = 100000
train_para = {'lr': [1e-4, 1e-5],
'lr_iter': [100000],
'max_iter': 200000,
'show_loss_freq': 10,
'snapshot_freq': 5000,
'snapshot_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'finetune_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'loss_weight_PAF': 1.0,
}
PATH_TO_SNAPSHOTS = './{}/model-{}'.format(train_para['finetune_dir'], already_trained) # only used when USE_RETRAINED is true
numStage = 5
ignore_PAF_2D = True
with tf.Graph().as_default(), tf.device('/cpu:0'):
domereader = DomeReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True, full_only=False, head_top=True)
# domereader.rotate_augmentation = True
human36reader = HumanReader(mode='training', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
# mpi3dreader = HumanReader(mode='training', name='MPI_INF_3DHP', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
cocoreader = COCOReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# cocoreader.rotate_augmentation = True
# upreader = HumanReader(mode='training', name='UP', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# surrealreader = HumanReader(mode='training', name='SURREAL', batch_size=3, shuffle=True, objtype=0, crop_noise=True)
# domereader.crop_size = 512
# human36reader.crop_size = 512
# cocoreader.crop_size = 512
data = combineMultiDataset([
domereader.get(),
human36reader.get(),
cocoreader.get(),
# mpi3dreader.get()
# upreader.get(),
# surrealreader.get()
],
name_wanted=['image_crop', 'scoremap2d', 'body_valid', 'PAF', 'PAF_type', 'mask_crop'])
for k, v in data.items():
data[k] = tf.split(v, num_gpu, 0)
if fine_tune:
global_step = tf.Variable(already_trained + 1, trainable=False, name="global_step")
else:
global_step = tf.Variable(0, trainable=False, name="global_step")
lr_scheduler = utils.general.LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter'])
lr = lr_scheduler.get_lr(global_step)
opt = tf.train.AdamOptimizer(lr)
tower_grads = []
tower_losses = []
tower_losses_PAF = []
tower_losses_2d = []
with tf.variable_scope(tf.get_variable_scope()):
for ig in range(num_gpu):
with tf.device('/gpu:%d' % ig):
# build network
net = CPM(out_chan=21, crop_size=368, withPAF=True, PAFdim=3, numPAF=23, numStage=numStage)
predicted_scoremaps, _, predicted_PAFs = net.inference(data['image_crop'][ig], train=True)
# with tf.variable_scope('hourglass'):
# net = Hourglass(num_output_channel=20, PAF_dim=3, num_PAF=20, num_hourglass=numStage)
# predicted_scoremaps, predicted_PAFs = net.inference(data['image_crop'][ig])
# Loss
s = data['scoremap2d'][ig].get_shape().as_list()
valid = tf.concat([data['body_valid'][ig], tf.zeros((s[0], 1), dtype=tf.bool)], axis=1)
valid = tf.cast(valid, tf.float32)
mask_scoremap = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
loss_2d = 0.0
# multiply mask_scoremap to mask out the invalid areas
for ip, predicted_scoremap in enumerate(predicted_scoremaps):
resized_scoremap = tf.image.resize_images(predicted_scoremap, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
mean_over_pixel = tf.reduce_sum(tf.square((resized_scoremap - data['scoremap2d'][ig]) * mask_scoremap), [1, 2]) / (tf.reduce_sum(mask_scoremap, [1, 2]) + 1e-6)
loss_2d_ig = tf.reduce_sum(valid * mean_over_pixel) / (tf.reduce_sum(valid) + 1e-6)
loss_2d += loss_2d_ig
loss_2d /= len(predicted_scoremaps)
assert 'PAF' in data
loss_PAF = 0.0
valid_PAF = tf.cast(utils.PAF.getValidPAF(data['body_valid'][ig], 0, PAFdim=3), tf.float32)
# multiply mask_PAF to mask out the invalid areas
s = data['PAF'][ig].get_shape().as_list()
mask_PAF = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1, 3]) # detach x, y, z
if ignore_PAF_2D:
mask_PAF2D = mask_PAF * tf.constant([0, 0, 0], dtype=tf.float32)
else:
mask_PAF2D = mask_PAF * tf.constant([1, 1, 0], dtype=tf.float32) # for the 2D case
mask_PAF = tf.where(data['PAF_type'][ig], mask_PAF, mask_PAF2D) # take out corresponding mask by PAF type
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1])
for ip, pred_PAF in enumerate(predicted_PAFs):
resized_PAF = tf.image.resize_images(pred_PAF, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
channelWisePAF = tf.reshape(resized_PAF, [s[0], s[1], s[2], -1, 3])
PAF_x2y2 = tf.sqrt(tf.reduce_sum(tf.square(channelWisePAF[:, :, :, :, 0:2]), axis=4)) + 1e-6
PAF_normed_x = channelWisePAF[:, :, :, :, 0] / PAF_x2y2
PAF_normed_y = channelWisePAF[:, :, :, :, 1] / PAF_x2y2
PAF_normed_z = tf.zeros(PAF_normed_x.get_shape(), dtype=tf.float32)
normed_PAF = tf.stack([PAF_normed_x, PAF_normed_y, PAF_normed_z], axis=4)
normed_PAF = tf.reshape(normed_PAF, [s[0], s[1], s[2], -1])
normed_PAF = tf.where(tf.logical_and(tf.not_equal(data['PAF'][ig], 0.0), tf.not_equal(resized_PAF, 0.0)),
normed_PAF, tf.zeros((s[0], s[1], s[2], s[3]), dtype=tf.float32)) # use normed_PAF only in pixels where PAF is not zero
final_PAF = tf.where(data['PAF_type'][ig], resized_PAF, normed_PAF)
# mean_over_pixel = tf.reduce_sum(tf.square((resized_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
mean_over_pixel = tf.reduce_sum(tf.square((final_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
loss_PAF_ig = tf.reduce_sum(valid_PAF * mean_over_pixel) / (tf.reduce_sum(valid_PAF) + 1e-6)
loss_PAF += loss_PAF_ig
loss_PAF /= len(predicted_PAFs)
loss = loss_2d + loss_PAF * train_para['loss_weight_PAF']
tf.get_variable_scope().reuse_variables()
tower_losses.append(loss)
tower_losses_PAF.append(loss_PAF)
tower_losses_2d.append(loss_2d)
grad = opt.compute_gradients(loss)
tower_grads.append(grad)
total_loss = tf.reduce_mean(tower_losses)
total_loss_PAF = tf.reduce_mean(tower_losses_PAF)
total_loss_2d = tf.reduce_mean(tower_losses_2d)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('loss_PAF', total_loss_PAF)
tf.summary.scalar('loss_2d', total_loss_2d)
# init weights
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_para['snapshot_dir'] + '/train', sess.graph)
if not fine_tune:
start_iter = 0
if net.name == 'CPM':
net.init('./weights/openpose_body_3DPAF_randomz_headtop_chest.npy', sess)
# net.init('./weights/openpose_body_expanded_PAF.npy', sess)
elif net.name == 'Hourglass':
from tensorflow.contrib.framework import assign_from_values_fn
with open('weights/Hourglass_weights_processed.pkl', 'rb') as f:
hg_data = pickle.load(f)
map_trainable_variables = {i.name.replace('hourglass', 'my_model').replace(':0', ''): i.name for i in tf.trainable_variables()}
dic = dict()
for i, j in map_trainable_variables.items():
if i in hg_data:
dic[j] = hg_data[i]
init_fn = assign_from_values_fn(dic)
assert init_fn is not None
init_fn(sess)
else:
raise NotImplementedError
# net.init_vgg(sess)
else:
from utils.load_ckpt import load_weights_from_snapshot
load_weights_from_snapshot(sess, PATH_TO_SNAPSHOTS)
# saver.restore(sess, PATH_TO_SNAPSHOTS)
start_iter = already_trained + 1
# snapshot dir
if not os.path.exists(train_para['snapshot_dir']):
os.mkdir(train_para['snapshot_dir'])
print('Created snapshot dir:', train_para['snapshot_dir'])
# Training loop
print('Starting to train ...')
for i in range(start_iter, train_para['max_iter']):
# V = sess.run([resized_PAF, mask_PAF, PAF_x2y2, PAF_normed_x, PAF_normed_y, PAF_normed_z, normed_PAF, final_PAF, mean_over_pixel, loss_PAF_ig])
# import pdb
# pdb.set_trace()
summary, _, loss_v, loss_2d_v, loss_PAF_v = sess.run([merged, apply_gradient_op, total_loss, total_loss_2d, total_loss_PAF])
train_writer.add_summary(summary, i)
if (i % train_para['show_loss_freq']) == 0:
print('Iteration %d\t Loss %.1e, Loss_2d %.1e, Loss_PAF %.1e' % (i, loss_v, loss_2d_v, loss_PAF_v))
sys.stdout.flush()
if (i % train_para['snapshot_freq']) == 0 and i > start_iter:
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=i)
print('Saved a snapshot.')
sys.stdout.flush()
print('Training finished. Saving final snapshot.')
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=train_para['max_iter'])
| body2hands-main | visualization/POF/training_e2e_PAF.py |
from __future__ import print_function, unicode_literals
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import matplotlib.pyplot as plt
import matplotlib.patches
from mpl_toolkits.mplot3d import Axes3D
import argparse
import cv2
import os
from time import time
import json
from nets.CPM import CPM
from utils.load_ckpt import load_weights_from_snapshot
import utils.general
import utils.keypoint_conversion
import utils.PAF
import pickle
from utils.smoothing import savitzky_golay
body_zoom = 1.8
# hand_zoom = 2.5 # dslr_hands5, dslr_hands6, youtube_talkshow1
hand_zoom = 1.5 # youtube_conduct4
TRACK_HAND = True
BACK_TRACK_THRESH = 2.0
# evaluate both hands and body
parser = argparse.ArgumentParser()
parser.add_argument('--visualize', '-v', action='store_true')
parser.add_argument('--seqName', '-s', type=str)
parser.add_argument('--path', '-p', type=str)
parser.add_argument('--start-from', type=int, default=1)
parser.add_argument('--end-index', type=int, default=-1)
parser.add_argument('--width', type=int, default=1920) # to determine whether a keypoint is out of image
parser.add_argument('--height', type=int, default=1080)
parser.add_argument('--save-image', action='store_true')
parser.add_argument('--freeze', '-f', action='store_true') # upperbody only
args = parser.parse_args()
assert os.path.isdir(args.path)
if not os.path.isdir(os.path.join(args.path, 'net_output')):
os.makedirs(os.path.join(args.path, 'net_output'))
assert os.path.isdir(os.path.join(args.path, 'net_output'))
if args.save_image:
for folder in ['/body_2d', '/lhand_2d', '/rhand_2d', '/paf_xy_body', '/paf_z_body', '/paf_xy_lhand', '/paf_z_lhand', '/paf_xy_rhand', '/paf_z_rhand', '/heatmap']:
try:
os.makedirs(args.path + folder)
except Exception as e:
print ('Folder {} exists'.format(args.path + folder))
start_from = args.start_from
end_index = args.end_index
image_root = os.path.join(args.path, 'raw_image')
pkl_file = os.path.join(args.path, '{}.pkl'.format(args.seqName))
with open(pkl_file, 'rb') as f:
pkl_data = pickle.load(f)
num_samples = len(pkl_data[0]) # number of frames collected in pkl
K = np.array(pkl_data[5]['K'], dtype=np.float32)
s = [1, 368, 368, 3]
assert s[1] == s[2]
data = {
'bimage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3]),
'limage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3]),
'rimage_crop': tf.placeholder_with_default(tf.zeros([s[0], s[1], s[2], 3], dtype=tf.float32),
shape=[s[0], s[1], s[2], 3])
}
bcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
bscale2d_origin = np.zeros((num_samples,), dtype=np.float32)
# precompute the body bounding box for smoothing
for i in range(num_samples):
openpose_body = pkl_data[0][i, list(range(18)) + [1, 1], :2].astype(np.float32) # duplicate neck for headtop and chest
openpose_body_score = pkl_data[0][i, list(range(18)) + [0, 0], 2].astype(np.float32)
openpose_body_valid = (openpose_body_score > 0.01)
if not openpose_body_valid.any():
# no bounding box
if i > 0:
bcrop_center2d_origin[i] = bcrop_center2d_origin[i - 1]
bscale2d_origin[i] = bscale2d_origin[i - 1]
min_coord = np.amin(openpose_body[openpose_body_valid], axis=0)
max_coord = np.amax(openpose_body[openpose_body_valid], axis=0)
bcrop_center2d_origin[i] = 0.5 * (min_coord + max_coord)
fit_size = np.amax(np.maximum(max_coord - bcrop_center2d_origin[i], bcrop_center2d_origin[i] - min_coord))
# if (not openpose_body_valid[9]) and (not openpose_body_valid[10]) and (not openpose_body_valid[12]) and (not openpose_body_valid[13]):
if args.freeze or ((not openpose_body_valid[9]) and (not openpose_body_valid[10]) and (not openpose_body_valid[12]) and (not openpose_body_valid[13])):
# upper body only (detected by openpose)
# crop_size_best = 2 * fit_size * 3 # youtube_talkshow1
crop_size_best = 2 * fit_size * 4
else:
crop_size_best = 2 * fit_size * body_zoom
bscale2d_origin[i] = float(s[1]) / crop_size_best
bcrop_center2d_smooth = np.stack((savitzky_golay(bcrop_center2d_origin[:, 0], 21, 3), savitzky_golay(bcrop_center2d_origin[:, 1], 21, 3)), axis=1)
bscale2d_smooth = savitzky_golay(bscale2d_origin, 21, 3)
####
print('set bscale2d constant')
# bscale2d_smooth[1:] = bscale2d_smooth[0]
bscale2d_smooth[:-1] = bscale2d_smooth[-1]
if args.visualize:
plt.plot(bcrop_center2d_origin[:, 0])
plt.plot(bcrop_center2d_smooth[:, 0])
plt.show()
plt.plot(bcrop_center2d_origin[:, 1])
plt.plot(bcrop_center2d_smooth[:, 1])
plt.show()
plt.plot(bscale2d_origin)
plt.plot(bscale2d_smooth)
plt.show()
max_rsize = 0.0
max_lsize = 0.0
rhand_ref_frame = -1
lhand_ref_frame = -1
for i in range(num_samples):
openpose_rhand = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_rhand_score = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand_score'], 2].astype(np.float32)
openpose_rhand_valid = (openpose_rhand_score > 0.01)
if openpose_rhand_valid.any():
min_coord = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rfit_size = np.amax(max_coord - min_coord) / 2
if rfit_size > max_rsize:
max_rsize = rfit_size
rhand_ref_frame = i
openpose_lhand = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_lhand_score = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand_score'], 2].astype(np.float32)
openpose_lhand_valid = (openpose_lhand_score > 0.01)
if openpose_lhand_valid.any():
min_coord = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lfit_size = np.amax(max_coord - min_coord) / 2
if lfit_size > max_lsize:
max_lsize = lfit_size
lhand_ref_frame = i
assert max_rsize > 0
assert max_lsize > 0
rscale2d_ref = float(s[1]) / (2 * max_rsize * hand_zoom)
lscale2d_ref = float(s[1]) / (2 * max_lsize * hand_zoom)
bodynet = CPM(out_chan=21, crop_size=368, withPAF=True, PAFdim=3, numPAF=23)
handnet = CPM(out_chan=22, numPAF=20, crop_size=368, withPAF=True, PAFdim=3)
with tf.variable_scope('body'):
# feed through network
bheatmap_2d, _, bPAF = bodynet.inference(data['bimage_crop'], train=False)
with tf.variable_scope('hand', reuse=tf.AUTO_REUSE):
lheatmap_2d, _, lPAF = handnet.inference(data['limage_crop'], train=False)
# rheatmap_2d, _, rPAF = handnet.inference(data['rimage_crop'], train=False)
rheatmap_2d, _, rPAF = handnet.inference(data['rimage_crop'][:, :, ::-1, :], train=False) # flip right to left
s = data['bimage_crop'].get_shape().as_list()
data['bheatmap_2d'] = tf.image.resize_images(bheatmap_2d[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['bPAF'] = tf.image.resize_images(bPAF[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
s = data['limage_crop'].get_shape().as_list()
data['lheatmap_2d'] = tf.image.resize_images(lheatmap_2d[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['lPAF'] = tf.image.resize_images(lPAF[-1], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
s = data['rimage_crop'].get_shape().as_list()
data['rheatmap_2d'] = tf.image.resize_images(rheatmap_2d[-1][:, :, ::-1, :], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC) # flip back to right hand
data['rPAF'] = tf.image.resize_images(rPAF[-1][:, :, ::-1, :], (s[1], s[2]), tf.image.ResizeMethod.BICUBIC)
data['rPAF'] = data['rPAF'] * tf.constant([-1, 1, 1] * (60 // 3), dtype=tf.float32)
# Start TF
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.35)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
cpt = './snapshots/Final_qual_domeCOCO_chest_noPAF2D/model-390000'
load_weights_from_snapshot(sess, cpt, discard_list=['Adam', 'global_step', 'beta'], rename_dict={'CPM': 'body/CPM'})
cpt = './snapshots/Final_qual_hand_clear_zoom/model-160000'
load_weights_from_snapshot(sess, cpt, discard_list=['Adam', 'global_step', 'beta'], rename_dict={'CPM': 'hand/CPM'})
eval_list = ['bimage_crop', 'image', 'bcrop_center2d', 'bscale2d', 'bheatmap_2d', 'bPAF', 'body_uv_local', 'img_dir']
eval_list += ['limage_crop', 'lcrop_center2d', 'lscale2d', 'lheatmap_2d', 'lPAF', 'lhand_uv_local']
eval_list += ['rimage_crop', 'rcrop_center2d', 'rscale2d', 'rheatmap_2d', 'rPAF', 'rhand_uv_local']
eval_list += ['K', 'openpose_face', 'body_valid', 'left_hand_valid', 'right_hand_valid', 'openpose_body_score', 'openpose_lhand_score', 'openpose_rhand_score', 'openpose_face_score']
eval_list += ['openpose_foot', 'openpose_foot_score']
BODY_PAF_SELECT_INDEX = np.concatenate([np.arange(9), np.arange(10, 13), np.arange(14, 23)], axis=0)
lcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
lscale2d_origin = np.zeros((num_samples), dtype=np.float32)
rcrop_center2d_origin = np.zeros((num_samples, 2), dtype=np.float32)
rscale2d_origin = np.zeros((num_samples), dtype=np.float32)
frame_indices = pkl_data[6]
for i, frame_index in enumerate(frame_indices):
if frame_index < start_from:
continue
if args.end_index > 0 and frame_index > args.end_index:
break
if frame_index == start_from:
start_i = i
print('Start running frame No. {:08d}'.format(frame_index))
# read the data here
filename = os.path.join(image_root, pkl_data[4][i])
image_v = cv2.imread(filename)[:, :, ::-1] # convert to RGB order
val_dict = {}
openpose_body = pkl_data[0][i, list(range(18)) + [1, 1], :2].astype(np.float32) # duplicate neck for headtop and chest
openpose_body_score = pkl_data[0][i, list(range(18)) + [0, 0], 2].astype(np.float32)
openpose_body_valid = (openpose_body_score > 0)
val_dict['openpose_body'] = openpose_body
val_dict['openpose_body_score'] = openpose_body_score
val_dict['openpose_body_valid'] = openpose_body_valid
val_dict['openpose_face'] = pkl_data[3][i, :, :2]
val_dict['openpose_face_score'] = pkl_data[3][i, :, 2]
val_dict['openpose_foot'] = pkl_data[0][i, 18:, :2]
val_dict['openpose_foot_score'] = pkl_data[0][i, 18:, 2]
"""
crop body and feed into network
"""
val_dict['bcrop_center2d'] = bcrop_center2d_smooth[i]
val_dict['bscale2d'] = bscale2d_smooth[i]
bcrop_center2d = bcrop_center2d_smooth[i]
bscale2d = bscale2d_smooth[i]
# compute the Homography
bH = np.array([[bscale2d, 0, s[2] / 2 - bscale2d * bcrop_center2d[0]], [0, bscale2d, s[1] / 2 - bscale2d * bcrop_center2d[1]]], dtype=np.float32)
bimage_crop_v = cv2.warpAffine(image_v, bH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
bimage_crop_v_feed = np.expand_dims((bimage_crop_v / 255 - 0.5), axis=0)
bheatmap_2d, bPAF = [np.squeeze(_) for _ in sess.run([data['bheatmap_2d'], data['bPAF']], feed_dict={data['bimage_crop']: bimage_crop_v_feed})]
val_dict['bheatmap_2d'] = bheatmap_2d
val_dict['bPAF'] = bPAF
# store the wrist coordinate of previous frame, to help verify hand bounding boxes
if frame_index > start_from:
lwrist_last = borigin[7]
lwrist_valid_last = body_valid[7]
rwrist_last = borigin[4]
rwrist_valid_last = body_valid[4]
# 2D body detection
if frame_index == start_from or args.seqName == 'qualitative':
body2d_pred_v, bscore = utils.PAF.detect_keypoints2d_PAF(val_dict['bheatmap_2d'], val_dict['bPAF'])
else:
body2d_pred_v, bscore = utils.PAF.detect_keypoints2d_PAF(val_dict['bheatmap_2d'], val_dict['bPAF'], prev_frame=prev_frame)
prev_frame = body2d_pred_v
body2d_pred_v = body2d_pred_v[:20, :] # with chest
body_valid = (bscore > 0.30)
body2d_pred_v[np.logical_not(body_valid)] = 0 # must do this, otherwise PAF_to_3D error
borigin = (body2d_pred_v - 184) / val_dict['bscale2d'] + val_dict['bcrop_center2d']
bout = (borigin[:, 0] < 0) + (borigin[:, 1] < 0) + (borigin[:, 0] >= args.width) + (borigin[:, 1] >= args.height)
body2d_pred_v[bout] = 0.0
body_valid[bout] = False
# store the wrist coordinate of current frame, to help verify hand bounding boxes
if frame_index > start_from:
lwrist = borigin[7]
lwrist_valid = body_valid[7]
rwrist = borigin[4]
rwrist_valid = body_valid[4]
"""
crop hands and feed into network
"""
openpose_rhand = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_rhand_score = pkl_data[2][i, utils.keypoint_conversion.a4_to_main['openpose_rhand_score'], 2].astype(np.float32)
openpose_rhand_valid = (openpose_rhand_score > 0.01)
openpose_lhand = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand'], :2].astype(np.float32) # duplicate neck for headtop
openpose_lhand_score = pkl_data[1][i, utils.keypoint_conversion.a4_to_main['openpose_lhand_score'], 2].astype(np.float32)
openpose_lhand_valid = (openpose_lhand_score > 0.01)
val_dict['openpose_rhand'] = openpose_rhand
val_dict['openpose_rhand_score'] = openpose_rhand_score
val_dict['openpose_rhand_valid'] = openpose_rhand_valid
val_dict['openpose_lhand'] = openpose_lhand
val_dict['openpose_lhand_score'] = openpose_lhand_score
val_dict['openpose_lhand_valid'] = openpose_lhand_valid
lscale2d = lscale2d_ref
rscale2d = rscale2d_ref
if not TRACK_HAND or frame_index == start_from: # the first frame
if openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
fit_size_rhand = np.amax(np.maximum(max_coord_rhand - rcrop_center2d, rcrop_center2d - min_coord_rhand))
crop_size_best_r = 2 * fit_size_rhand * hand_zoom
else:
rcrop_center2d = np.array([-1000., -1000.])
fit_size_rhand = 100
crop_size_best_r = 2 * fit_size_rhand * hand_zoom
if openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
fit_size_lhand = np.amax(np.maximum(max_coord_lhand - lcrop_center2d, lcrop_center2d - min_coord_lhand))
crop_size_best_l = 2 * fit_size_lhand * hand_zoom
else:
lcrop_center2d = np.array([-1000., -1000.])
fit_size_lhand = 100
crop_size_best_l = 2 * fit_size_lhand * hand_zoom
if not TRACK_HAND:
rscale2d = float(s[1]) / crop_size_best_r
lscale2d = float(s[1]) / crop_size_best_l
else:
# flag, boxes = tracker.update(image_v)
gray_prev_image = cv2.cvtColor(prev_image_v, cv2.COLOR_RGB2GRAY)
gray_current_image = cv2.cvtColor(image_v, cv2.COLOR_RGB2GRAY)
l_lk_params = {'winSize': (int(2 * lhand_track_size), int(2 * lhand_track_size)), 'maxLevel': 3}
lp, lstatus, error = cv2.calcOpticalFlowPyrLK(gray_prev_image, gray_current_image, lcenter.reshape(1, 2), None, **l_lk_params)
lp_2, lstatus_2, error_2 = cv2.calcOpticalFlowPyrLK(gray_current_image, gray_prev_image, lp, None, **l_lk_params)
if nl.norm(lp_2[0] - lcenter) > BACK_TRACK_THRESH or error[0] > 15:
print ('LK left hand failed.')
lstatus[0] = 0
r_lk_params = {'winSize': (int(2 * rhand_track_size), int(2 * rhand_track_size)), 'maxLevel': 3}
rp, rstatus, error = cv2.calcOpticalFlowPyrLK(gray_prev_image, gray_current_image, rcenter.reshape(1, 2), None, **r_lk_params)
rp_2, rstatus_2, error_2 = cv2.calcOpticalFlowPyrLK(gray_current_image, gray_prev_image, rp, None, **r_lk_params)
if nl.norm(rp_2[0] - rcenter) > BACK_TRACK_THRESH or error[0] > 15:
print ('LK right hand failed.')
rstatus[0] = 0
lcrop_center2d_last = lcrop_center2d
rcrop_center2d_last = rcrop_center2d
if lstatus[0]:
lcrop_center2d = lp[0]
elif openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
elif lwrist_valid and lwrist_valid_last:
lcrop_center2d = lcrop_center2d_last + lwrist - lwrist_last
if rstatus[0]:
rcrop_center2d = rp[0]
elif openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
elif rwrist_valid and rwrist_valid_last:
rcrop_center2d = rcrop_center2d_last + rwrist - rwrist_last
# rcrop_center2d = rcenter + rwrist - rwrist_last
# chest the distance between wrist & hand bbox, and the velocity of wrist & hand bbox
# Also, if valid keypoint is too few, then don't trust the tracking result.
if np.sum(lhand_valid) < 5 or \
lwrist_valid and nl.norm(lwrist - lcrop_center2d) / lhand_track_size > 2 or \
(lwrist_valid and lwrist_valid_last and
nl.norm(lwrist - lwrist_last - lcrop_center2d + lcrop_center2d_last) / lhand_track_size > 1):
print ('tracking left hand lost, starting from openpose')
if openpose_lhand_valid.any():
min_coord_lhand = np.amin(openpose_lhand[openpose_lhand_valid], axis=0)
max_coord_lhand = np.amax(openpose_lhand[openpose_lhand_valid], axis=0)
lcrop_center2d = 0.5 * (min_coord_lhand + max_coord_lhand)
elif lwrist_valid:
lcrop_center2d = lwrist
elif lwrist_valid_last:
lcrop_center2d = lwrist_last
else:
# If Openpose not available and no wrist is available, then do not update the cropping center
lcrop_center2d = lcrop_center2d_last
if np.sum(rhand_valid) < 5 or \
rwrist_valid and nl.norm(rwrist - rcrop_center2d) / rhand_track_size > 2 or \
(rwrist_valid and rwrist_valid_last and
nl.norm(rwrist - rwrist_last - rcrop_center2d + rcrop_center2d_last) / rhand_track_size > 1):
print ('tracking right hand lost, starting from openpose')
if openpose_rhand_valid.any():
min_coord_rhand = np.amin(openpose_rhand[openpose_rhand_valid], axis=0)
max_coord_rhand = np.amax(openpose_rhand[openpose_rhand_valid], axis=0)
rcrop_center2d = 0.5 * (min_coord_rhand + max_coord_rhand)
elif rwrist_valid:
rcrop_center2d = rwrist
elif rwrist_valid_last:
rcrop_center2d = rwrist_last
else:
# If Openpose not available and no wrist is available, then do not update the cropping center
rcrop_center2d = rcrop_center2d_last
rcrop_center2d_origin[i] = rcrop_center2d
val_dict['rcrop_center2d'] = rcrop_center2d
rscale2d_origin[i] = rscale2d
val_dict['rscale2d'] = rscale2d
rH = np.array([[rscale2d, 0, s[2] / 2 - rscale2d * rcrop_center2d[0]], [0, rscale2d, s[1] / 2 - rscale2d * rcrop_center2d[1]]], dtype=np.float32)
rimage_crop_v = cv2.warpAffine(image_v, rH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
rimage_crop_v_feed = np.expand_dims((rimage_crop_v / 255 - 0.5), axis=0)
lcrop_center2d_origin[i] = lcrop_center2d
val_dict['lcrop_center2d'] = lcrop_center2d
lscale2d_origin[i] = lscale2d
val_dict['lscale2d'] = lscale2d
lH = np.array([[lscale2d, 0, s[2] / 2 - lscale2d * lcrop_center2d[0]], [0, lscale2d, s[1] / 2 - lscale2d * lcrop_center2d[1]]], dtype=np.float32)
limage_crop_v = cv2.warpAffine(image_v, lH, (s[2], s[1]), flags=cv2.INTER_LANCZOS4)
limage_crop_v_feed = np.expand_dims((limage_crop_v / 255 - 0.5), axis=0)
lheatmap_2d, lPAF, rheatmap_2d, rPAF = \
[np.squeeze(_) for _ in
sess.run([data['lheatmap_2d'], data['lPAF'], data['rheatmap_2d'], data['rPAF']],
feed_dict={data['limage_crop']: limage_crop_v_feed, data['rimage_crop']: rimage_crop_v_feed})]
val_dict['rheatmap_2d'] = rheatmap_2d
val_dict['rPAF'] = rPAF
val_dict['lheatmap_2d'] = lheatmap_2d
val_dict['lPAF'] = lPAF
lhand2d_pred_v, lscore = utils.PAF.detect_keypoints2d_PAF(val_dict['lheatmap_2d'], val_dict['lPAF'], objtype=1)
rhand2d_pred_v, rscore = utils.PAF.detect_keypoints2d_PAF(val_dict['rheatmap_2d'], val_dict['rPAF'], objtype=1)
lhand2d_pred_v = lhand2d_pred_v[:21, :]
rhand2d_pred_v = rhand2d_pred_v[:21, :]
lhand_valid = lscore > 0.20 # false means that openpose fails to give the correct bounding box for hands
rhand_valid = rscore > 0.20
lhand2d_pred_v[np.logical_not(lhand_valid)] = 0 # must do this, otherwise PAF_to_3D error
rhand2d_pred_v[np.logical_not(rhand_valid)] = 0 # must do this, otherwise PAF_to_3D error
# check whether the keypoint is out of image
lorigin = (lhand2d_pred_v - 184) / val_dict['lscale2d'] + val_dict['lcrop_center2d']
lout = (lorigin[:, 0] < 0) + (lorigin[:, 1] < 0) + (lorigin[:, 0] >= args.width) + (lorigin[:, 1] >= args.height)
lhand2d_pred_v[lout] = 0.0
lhand_valid[lout] = False
rorigin = (rhand2d_pred_v - 184) / val_dict['rscale2d'] + val_dict['rcrop_center2d']
rout = (rorigin[:, 0] < 0) + (rorigin[:, 1] < 0) + (rorigin[:, 0] >= args.width) + (rorigin[:, 1] >= args.height)
rhand2d_pred_v[rout] = 0.0
rhand_valid[rout] = False
if args.freeze:
# freeze the torso
body2d_pred_v[8:14] = 0
body_valid[8:14] = 0
body2d_pred_v[19] = 0
body_valid[19] = 0
# rescale 2D detection back to the original image
body_2d = {'uv_local': body2d_pred_v, 'scale2d': val_dict['bscale2d'], 'crop_center2d': val_dict['bcrop_center2d'], 'valid': body_valid}
lhand_2d = {'uv_local': lhand2d_pred_v, 'scale2d': val_dict['lscale2d'], 'crop_center2d': val_dict['lcrop_center2d'], 'valid': lhand_valid}
rhand_2d = {'uv_local': rhand2d_pred_v, 'scale2d': val_dict['rscale2d'], 'crop_center2d': val_dict['rcrop_center2d'], 'valid': rhand_valid}
total_keypoints_2d = utils.keypoint_conversion.assemble_total_2d(body_2d, lhand_2d, rhand_2d) # put back to original image size, and change the keypoint order
openpose_face = val_dict['openpose_face']
openpose_face[:, 0] *= (val_dict['openpose_face_score'] > 0.5) # Face must have a high threshold in case of occlusion.
openpose_face[:, 1] *= (val_dict['openpose_face_score'] > 0.5)
openpose_foot = val_dict['openpose_foot']
openpose_foot[:, 0] *= (val_dict['openpose_foot_score'] > 0.05)
openpose_foot[:, 1] *= (val_dict['openpose_foot_score'] > 0.05)
total_keypoints_2d = np.concatenate([total_keypoints_2d, openpose_face, openpose_foot], axis=0) # has dimension 20 + 21 + 21 + 70 + 6
# extract PAF vectors from network prediction
body3d_pred_v, _ = utils.PAF.PAF_to_3D(body2d_pred_v, val_dict['bPAF'], objtype=0) # vec3ds has 18 rows, excluding shoulder to ear connection, only 14 used for fitting
vec3ds = utils.PAF.collect_PAF_vec(body2d_pred_v, val_dict['bPAF'], objtype=0) # vec3ds has 18 rows, excluding shoulder to ear connection, only 14 used for fitting
lhand3d_pred_v, _ = utils.PAF.PAF_to_3D(lhand2d_pred_v, val_dict['lPAF'], objtype=1)
lvec3ds = utils.PAF.collect_PAF_vec(lhand2d_pred_v, val_dict['lPAF'], objtype=1)
rhand3d_pred_v, _ = utils.PAF.PAF_to_3D(rhand2d_pred_v, val_dict['rPAF'], objtype=1)
rvec3ds = utils.PAF.collect_PAF_vec(rhand2d_pred_v, val_dict['rPAF'], objtype=1)
body3d_pred_v[np.logical_not(body_valid)] = 0
lhand3d_pred_v[np.logical_not(lhand_valid)] = 0
rhand3d_pred_v[np.logical_not(rhand_valid)] = 0
bPAF_valid = utils.PAF.getValidPAFNumpy(body_valid, 0) # A PAF is valid only if both end points are valid.
lPAF_valid = utils.PAF.getValidPAFNumpy(lhand_valid, 1)
rPAF_valid = utils.PAF.getValidPAFNumpy(rhand_valid, 1)
vec3ds[np.logical_not(bPAF_valid[BODY_PAF_SELECT_INDEX])] = 0
lvec3ds[np.logical_not(lPAF_valid)] = 0
rvec3ds[np.logical_not(rPAF_valid)] = 0
if args.freeze:
total_keypoints_2d[-6:] = 0
vec3ds[:6] = np.array([0., 1., 0.])
vec3ds[-3:] = 0
# all limbs plus neck -> nose, neck -> headtop, 3 connections with chest, (additional 6 connection), left hand, right hand (14 + 3 + 6 + 20 + 20)
PAF_vec = np.concatenate((vec3ds[:13, :], vec3ds[-4:, :], np.zeros([6, 3]), lvec3ds, rvec3ds), axis=0)
with open(os.path.join(args.path, 'net_output', '{:012d}.txt'.format(frame_index)), 'w') as f:
f.write('2D keypoints:\n')
for kp in total_keypoints_2d:
f.write('{} {}\n'.format(kp[0], kp[1]))
f.write('PAF:\n')
for vec in PAF_vec:
f.write('{} {} {}\n'.format(vec[0], vec[1], vec[2]))
f.write('{}\n'.format(float(np.sum(lscore) > 10)))
f.write('{}\n'.format(float(np.sum(rscore) > 10)))
if (np.sum(lscore) < 10):
print('Left hand blurry.')
if (np.sum(rscore) < 10):
print('Right hand blurry.')
if lhand_valid.any():
lcenter = 0.5 * (np.amin(lorigin[lhand_valid], axis=0) + np.amax(lorigin[lhand_valid], axis=0)).astype(np.float32) # detection center
else:
lcenter = lcrop_center2d.astype(np.float32)
if rhand_valid.any():
rcenter = 0.5 * (np.amin(rorigin[rhand_valid], axis=0) + np.amax(rorigin[rhand_valid], axis=0)).astype(np.float32) # detection center
else:
rcenter = rcrop_center2d.astype(np.float32)
lhand_track_size = fit_size_lhand * lscale2d_origin[start_i] / lscale2d
rhand_track_size = fit_size_rhand * rscale2d_origin[start_i] / rscale2d
prev_image_v = image_v
if args.visualize:
nc = 3
nr = 4
fig = plt.figure(1)
ax1 = fig.add_subplot(nc, nr, 1)
plt.imshow(bimage_crop_v)
utils.general.plot2d(ax1, body2d_pred_v, valid_idx=body_valid, type_str=utils.general.type_strs[0], color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(nc, nr, 2)
ax2.imshow(limage_crop_v)
utils.general.plot2d(ax2, lhand2d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax3 = fig.add_subplot(nc, nr, 3)
ax3.imshow(rimage_crop_v)
utils.general.plot2d(ax3, rhand2d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax4 = fig.add_subplot(nc, nr, 4)
bPAF_xy, bPAF_z = utils.PAF.plot_all_PAF(val_dict['bPAF'], 3)
ax4.imshow(bPAF_xy)
ax5 = fig.add_subplot(nc, nr, 5)
ax5.imshow(bPAF_z)
ax6 = fig.add_subplot(nc, nr, 6)
plt.imshow(image_v)
utils.general.plot2d(ax6, total_keypoints_2d, type_str='total', s=5)
ax7 = fig.add_subplot(nc, nr, 7, projection='3d')
utils.general.plot3d(ax7, body3d_pred_v, valid_idx=body_valid, type_str=utils.general.type_strs[0], color=np.array([0.0, 0.0, 1.0]))
ax7.set_xlabel('X Label')
ax7.set_ylabel('Y Label')
ax7.set_zlabel('Z Label')
plt.axis('equal')
ax8 = fig.add_subplot(nc, nr, 8, projection='3d')
utils.general.plot3d(ax8, lhand3d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax8.set_xlabel('X Label')
ax8.set_ylabel('Y Label')
ax8.set_zlabel('Z Label')
plt.axis('equal')
ax9 = fig.add_subplot(nc, nr, 9, projection='3d')
utils.general.plot3d(ax9, rhand3d_pred_v, type_str=utils.general.type_strs[1], color=np.array([0.0, 0.0, 1.0]))
ax9.set_xlabel('X Label')
ax9.set_ylabel('Y Label')
ax9.set_zlabel('Z Label')
plt.axis('equal')
plt.show()
if args.save_image:
utils.general.plot2d_cv2(bimage_crop_v, body2d_pred_v, s=5, valid_idx=body_valid, use_color=False)
assert cv2.imwrite(os.path.join(args.path, 'body_2d', '{:04d}.png'.format(i)), bimage_crop_v[:, :, ::-1])
bPAF_xy, bPAF_z = utils.PAF.plot_all_PAF(val_dict['bPAF'], 3)
k = 1. / val_dict['bscale2d']
tx, ty = (val_dict['bcrop_center2d'] - 184 * k).astype(int)
M = np.array([[k, 0., tx], [0., k, ty]], dtype=np.float32)
resized_PAF_xy = cv2.warpAffine(bPAF_xy, M, (1920, 1080))[:args.height, :args.width, :]
resized_PAF_z = cv2.warpAffine(bPAF_z, M, (1920, 1080))[:args.height, :args.width, :]
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_body', '{:04d}.png'.format(frame_index)), 255 - resized_PAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_body', '{:04d}.png'.format(frame_index)), 255 - resized_PAF_z[:, :, ::-1])
utils.general.plot2d_cv2(limage_crop_v, lhand2d_pred_v, type_str='hand', s=5, use_color=True)
lPAF_xy, lPAF_z = utils.PAF.plot_all_PAF(val_dict['lPAF'], 3)
assert cv2.imwrite(os.path.join(args.path, 'lhand_2d', '{:04d}.png'.format(frame_index)), limage_crop_v[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_lhand', '{:04d}.png'.format(frame_index)), 255 - lPAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_lhand', '{:04d}.png'.format(frame_index)), 255 - lPAF_z[:, :, ::-1])
utils.general.plot2d_cv2(rimage_crop_v, rhand2d_pred_v, type_str='hand', s=5, use_color=True)
rPAF_xy, rPAF_z = utils.PAF.plot_all_PAF(val_dict['rPAF'], 3)
assert cv2.imwrite(os.path.join(args.path, 'rhand_2d', '{:04d}.png'.format(frame_index)), rimage_crop_v[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_xy_rhand', '{:04d}.png'.format(frame_index)), 255 - rPAF_xy[:, :, ::-1])
assert cv2.imwrite(os.path.join(args.path, 'paf_z_rhand', '{:04d}.png'.format(frame_index)), 255 - rPAF_z[:, :, ::-1])
| body2hands-main | visualization/POF/save_total_sequence.py |
import tensorflow as tf
import pickle
import os
from utils.ops import NetworkOps as ops
class handSegNet:
def __init__(self):
pass
def init_sess(self, sess):
file_name = './weights/handsegnet-rhd.pickle'
exclude_var_list = []
assert os.path.exists(file_name), "File not found."
with open(file_name, 'rb') as fi:
weight_dict = pickle.load(fi)
weight_dict = {k: v for k, v in weight_dict.items() if not any([x in k for x in exclude_var_list])}
if len(weight_dict) > 0:
init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict)
sess.run(init_op, init_feed)
print('Loaded %d variables from %s' % (len(weight_dict), file_name))
def inference_detection(self, image, train=False):
""" HandSegNet: Detects the hand in the input image by segmenting it.
Inputs:
image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted
train: bool, True in case weights should be trainable
Outputs:
scoremap_list_large: list of [B, 256, 256, 2] tf.float32 tensor, Scores for the hand segmentation classes
"""
with tf.variable_scope('HandSegNet'):
scoremap_list = list()
layers_per_block = [2, 2, 4, 4]
out_chan_list = [64, 128, 256, 512]
pool_list = [True, True, True, False]
# learn some feature representation, that describes the image content well
x = image
for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1):
for layer_id in range(layer_num):
x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id + 1), kernel_size=3, stride=1, out_chan=chan_num, trainable=train)
if pool:
x = ops.max_pool(x, 'pool%d' % block_id)
x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, trainable=train)
encoding = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=128, trainable=train)
# use encoding to detect initial scoremap
x = ops.conv_relu(encoding, 'conv6_1', kernel_size=1, stride=1, out_chan=512, trainable=train)
scoremap = ops.conv(x, 'conv6_2', kernel_size=1, stride=1, out_chan=2, trainable=train)
scoremap_list.append(scoremap)
# upsample to full size
s = image.get_shape().as_list()
scoremap_list_large = [tf.image.resize_images(x, (s[1], s[2])) for x in scoremap_list]
return scoremap_list_large
| body2hands-main | visualization/POF/utils/handSegNet.py |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import utils.general
import skimage.feature
import json
import os
PAF_type = 0
allPAFConnection = [[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [1, 19], [19, 8], [19, 11]]),
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
], # PAF type 0 (Original Openpose)
[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17],
[1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [2, 4], [5, 7], [8, 4], [11, 7], [8, 10], [11, 13]]), # augmented PAF
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
]] # PAF type 1 (My augmented PAF)
PAFConnection = allPAFConnection[PAF_type]
dist_thresh = 8
if os.path.exists('utils/default_PAF_lengths.json'):
with open('utils/default_PAF_lengths.json', 'r') as f:
default_PAF_length = json.load(f)
def getValidPAF(valid, objtype, PAFdim):
# input "valid": a tensor containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
with tf.variable_scope('getValidPAF'):
assert objtype in (0, 1)
connection = tf.constant(np.repeat(PAFConnection[objtype], PAFdim, axis=0), dtype=tf.int64)
batch_size = valid.get_shape().as_list()[0]
PAF_valid = []
for ib in range(batch_size):
b_valid = valid[ib, :]
assert len(b_valid.get_shape().as_list()) == 1
indexed_valid = tf.gather(b_valid, connection, axis=0)
PAF_valid.append(tf.logical_and(indexed_valid[:, 0], indexed_valid[:, 1]))
PAF_valid = tf.stack(PAF_valid, axis=0)
return PAF_valid
def getValidPAFNumpy(valid, objtype):
# used in testing time
# input "valid": a numpy array containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
assert objtype in (0, 1)
connection = PAFConnection[objtype]
PAF_valid = []
for conn in connection:
connection_valid = valid[conn[0]] and valid[conn[1]]
PAF_valid.append(connection_valid)
PAF_valid = np.array(PAF_valid, dtype=bool)
return PAF_valid
def createPAF(keypoint2d, keypoint3d, objtype, output_size, normalize_3d=True, valid_vec=None):
# objtype: 0: body, 1: hand
# output_size: (h, w)
# keypoint2d: (x, y)
# normalize_3d: if True: set x^2 + y^2 + z^2 = 1; else set x^2 + y^2 = 1
with tf.variable_scope('createPAF'):
assert keypoint2d.get_shape().as_list()[0] == keypoint3d.get_shape().as_list()[0]
assert keypoint2d.get_shape().as_list()[1] == 2
assert keypoint3d.get_shape().as_list()[1] == 3
if valid_vec is None:
valid_vec = tf.ones([keypoint2d.get_shape()[0]], dtype=tf.bool)
h_range = tf.expand_dims(tf.range(output_size[0]), 1)
w_range = tf.expand_dims(tf.range(output_size[1]), 0)
H = tf.cast(tf.tile(h_range, [1, output_size[1]]), tf.float32)
W = tf.cast(tf.tile(w_range, [output_size[0], 1]), tf.float32)
PAFs = []
for ic, conn in enumerate(PAFConnection[objtype]):
AB = keypoint2d[conn[1]] - keypoint2d[conn[0]] # joint 0 - > joint 1
l_AB = tf.sqrt(tf.reduce_sum(tf.square(AB)))
AB = AB / l_AB
dx = W - keypoint2d[conn[0], 0]
dy = H - keypoint2d[conn[0], 1]
dist = tf.abs(dy * AB[0] - dx * AB[1]) # cross product
Xmin = tf.minimum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) - dist_thresh
Xmax = tf.maximum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) + dist_thresh
Ymin = tf.minimum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) - dist_thresh
Ymax = tf.maximum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) + dist_thresh
within_range = tf.cast(W >= Xmin, tf.float32) * tf.cast(W <= Xmax, tf.float32) * tf.cast(H >= Ymin, tf.float32) * tf.cast(H <= Ymax, tf.float32)
within_dist = tf.cast(dist < dist_thresh, tf.float32)
mask = within_range * within_dist
AB3d = (keypoint3d[conn[1]] - keypoint3d[conn[0]])
if normalize_3d:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d)))
else:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d[:2])))
AB3d /= scale
AB3d = tf.where(tf.is_nan(AB3d), tf.zeros([3], dtype=tf.float32), AB3d)
cond_valid = tf.logical_and(valid_vec[conn[0]], valid_vec[conn[1]])
connPAF = tf.cond(cond_valid, lambda: tf.tile(tf.expand_dims(mask, 2), [1, 1, 3]) * AB3d, lambda: tf.zeros((output_size[0], output_size[1], 3), dtype=tf.float32))
# create the PAF only when both joints are valid
PAFs.append(connPAF)
concat_PAFs = tf.concat(PAFs, axis=2)
return concat_PAFs
def getColorAffinity(v):
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = min(max(v, 0.0), 1.0) * summed
if v < RY:
c = (255., 255. * (v / (RY)), 0.)
elif v < RY + YG:
c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
elif v < RY + YG + GC:
c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
elif v < RY + YG + GC + CB:
c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
elif v < summed - MR:
c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
elif v < summed:
c = (255., 0., 255. * (1 - ((v - RY - YG - GC - CB - BM) / (MR))))
else:
c = (255., 0., 0.)
return np.array(c)
def plot_PAF(PAF_array):
# return a 3-channel uint8 np array
assert len(PAF_array.shape) == 3
assert PAF_array.shape[2] == 2 or PAF_array.shape[2] == 3
out = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3), dtype=np.uint8)
# 2D PAF: use Openpose Visualization
x = PAF_array[:, :, 0]
y = PAF_array[:, :, 1]
rad = np.sqrt(np.square(x) + np.square(y))
rad = np.minimum(rad, 1.0)
a = np.arctan2(-y, -x) / np.pi
fk = (a + 1.) / 2.
for i in range(PAF_array.shape[0]):
for j in range(PAF_array.shape[1]):
color = getColorAffinity(fk[i, j]) * rad[i, j]
out[i, j, :] = color
if PAF_array.shape[2] == 3:
# also return the average z value (for judge pointing out / in)
# total_rad = np.sqrt(np.sum(np.square(PAF_array), axis=2))
# rz = PAF_array[:, :, 2] / total_rad
# rz[np.isnan(rz)] = 0.0
# rz[total_rad < 0.5] = 0.0
# z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
# z_map[:, :, 0] = 255 * rz * (rz > 0)
# z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
rz = PAF_array[:, :, 2]
z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
z_map[:, :, 0] = 255 * rz * (rz > 0)
z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
z_map = np.maximum(np.minimum(z_map, 255), 0)
return out, z_map.astype(np.uint8)
return out
def plot_all_PAF(PAF_array, PAFdim):
assert PAFdim in (2, 3)
if PAFdim == 2:
assert PAF_array.shape[2] % 2 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::2], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::2], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y], axis=2)
return plot_PAF(total_PAF)
else:
assert PAFdim == 3 and PAF_array.shape[2] % 3 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::3], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::3], axis=2)
total_PAF_z = np.sum(PAF_array[:, :, 2::3], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y, total_PAF_z], axis=2)
return plot_PAF(total_PAF)
def PAF_to_3D(coord2d, PAF, objtype=0):
if objtype == 0:
depth_root_idx = 1 # put neck at 0-depth
else:
assert objtype == 1
depth_root_idx = 0
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
coord3d = np.zeros((coord2d.shape[0], 3), dtype=coord2d.dtype)
coord3d[:, :2] = coord2d
coord3d[depth_root_idx, 2] = 0.0
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if ic in (9, 13):
continue
elif PAF_type == 1:
if ic in (9, 13) or ic >= 20:
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
if (A == B).all(): # A and B actually coincides with each other, put the default bone length.
coord3d[conn[1], 0] = A[0]
coord3d[conn[1], 1] = A[1]
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
# find the least square solution of Ax = b
A = np.zeros([3, 2])
A[2, 0] = -1.
A[:, 1] = vec3d
b = coord3d[conn[1]] - coord3d[conn[0]] # by this time the z-value of target joint should be 0
x, _, _, _ = nl.lstsq(A, b, rcond=-1)
if x[1] < 0: # the direction is reversed
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic] # assume that this connection is vertical to the screen
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = x[0]
if nl.norm(vec3d) < 0.1 or x[1] < 0: # If there is almost no response, or the direction is reversed, put it zero so that Adam does not fit.
vec3d[:] = 0
vec3d_array.append(vec3d)
return coord3d, np.array(vec3d_array)
def collect_PAF_vec(coord2d, PAF, objtype=0):
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
assert len(PAF.shape) == 3 # H, W, C
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and ic in (9, 13):
continue
elif PAF_type == 1 and ic in (9, 13): # need the extra PAFs here
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
if 3 * ic < PAF.shape[2]: # to be compatible with old network with only 20 PAFs instead of 23
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
else:
vec3d = np.zeros((3,))
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
vec3d_array.append(vec3d)
return np.array(vec3d_array)
def recon_skeleton_PAF(vec3ds, objtype=0):
# reconstruct a skeleton with standard bone length from PAF only
selected_PAF_array = []
if objtype == 0:
coord3d_pred_v = np.zeros([19, 3], dtype=vec3ds.dtype)
root_idx = 1
else:
assert objtype == 1
coord3d_pred_v = np.zeros([21, 3], dtype=vec3ds.dtype)
root_idx = 0
coord3d_pred_v[root_idx] = 0.0
count_vec = 0
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and (ic in (9, 13) or ic >= 21):
continue
elif PAF_type == 1 and ic in (9, 13):
continue
vec = vec3ds[count_vec]
vlength = nl.norm(vec)
assert vlength > 0
if vlength < 0.1: # almost no response, set to 0
vec = np.zeros(3, dtype=vec3ds.dtype)
else:
vec = vec / vlength # unit vector
selected_PAF_array.append(vec)
count_vec += 1
if objtype == 0 and PAF_type == 1 and ic >= 20:
continue
coord3d_pred_v[conn[1]] = coord3d_pred_v[conn[0]] + default_PAF_length[objtype][ic] * vec
return coord3d_pred_v, np.array(selected_PAF_array)
def connection_score_2d(A, B, PAF):
AB = (B - A).astype(np.float32)
if not AB.any():
# A B coincides
return 0.1
AB /= nl.norm(AB.astype(np.float32))
s = PAF.shape
assert len(s) == 3
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int)
vec2ds = PAF[points[1], points[0], :2]
inner_product = np.dot(vec2ds, AB)
return np.mean(inner_product)
def detect_keypoints2d_PAF(scoremaps, PAF, objtype=0, weight_conn=1.0, mean_shift=False, prev_frame=None):
print('PAF_type {}'.format(PAF_type))
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
num_candidate = 5
local_maxs = []
for i in range(s[2]):
candidates = skimage.feature.peak_local_max(scoremaps[:, :, i], num_peaks=num_candidate)
if candidates.shape[0] < num_candidate:
# if less than that, replicate the first element
if candidates.shape[0] > 0:
candidates = np.concatenate([candidates[0][np.newaxis, :]] * (num_candidate - candidates.shape[0]) + [candidates], axis=0)
else:
candidates = np.zeros((5, 2), dtype=int)
local_maxs.append(candidates)
if objtype == 0:
root_idx = 1 # starting constructing the tree from root_idx
else:
assert objtype == 1
root_idx = 0
joint_idx_list = [root_idx]
candidate_idx_list = [[c] for c in range(num_candidate)]
sum_score_list = [scoremaps[local_maxs[root_idx][c, 0], local_maxs[root_idx][c, 1], root_idx] for c in range(num_candidate)]
if prev_frame is not None:
for c in range(num_candidate):
sum_score_list[c] -= 20 * nl.norm(local_maxs[root_idx][candidate_idx_list[c][0]][::-1] - prev_frame[c]) / (s[0] + s[1])
# dynamic programming
for iconn, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if iconn in (9, 13) or iconn >= 21: # unused PAF connection
continue
elif PAF_type == 1:
if iconn in (9, 13) or iconn >= 20:
continue
joint_idx_list.append(conn[1])
candidates = local_maxs[conn[1]]
new_candidate_idx_list = []
new_sum_score_list = []
for ican, candidate in enumerate(candidates):
best_sum_score = -np.inf
best_candidate_idx = None
B = candidate[::-1]
for candidate_idx, sum_score in zip(candidate_idx_list, sum_score_list):
parent_idx = conn[0]
parent_candidate_idx = candidate_idx[joint_idx_list.index(parent_idx)]
A = local_maxs[parent_idx][parent_candidate_idx][::-1]
connection_score = connection_score_2d(A, B, PAF[:, :, 3 * iconn:3 * iconn + 3])
new_sum_score = sum_score + scoremaps[candidate[0], candidate[1], conn[1]] + weight_conn * connection_score # TODO
if prev_frame is not None:
new_sum_score -= 20 * nl.norm(prev_frame[conn[1]] - B) / (s[0] + s[1])
if new_sum_score > best_sum_score:
best_sum_score = new_sum_score
best_candidate_idx = candidate_idx
assert best_candidate_idx is not None
new_sum_score_list.append(best_sum_score)
new_candidate_idx_list.append(best_candidate_idx + [ican])
sum_score_list = new_sum_score_list
candidate_idx_list = new_candidate_idx_list
best_candidate_idx = candidate_idx_list[np.argmax(sum_score_list)]
best_candidate_idx_joint_order = np.zeros_like(best_candidate_idx)
best_candidate_idx_joint_order[np.array(joint_idx_list, dtype=int)] = best_candidate_idx
best_candidate = np.array([local_maxs[i][j] for i, j in enumerate(best_candidate_idx_joint_order)])
coord2d = best_candidate[:, ::-1]
if objtype == 0:
assert coord2d.shape[0] == 19 or coord2d.shape[0] == 20
if objtype == 1:
assert coord2d.shape[0] == 21
scores = []
for i in range(coord2d.shape[0]):
scores.append(scoremaps[coord2d[i, 1], coord2d[i, 0], i])
if mean_shift:
dWidth = 3
dHeight = 3
new_coord2d = []
for i in range(coord2d.shape[0]):
x1 = max(coord2d[i, 0] - dWidth, 0)
x2 = min(coord2d[i, 0] + dWidth + 1, s[1])
y1 = max(coord2d[i, 1] - dHeight, 0)
y2 = min(coord2d[i, 1] + dHeight + 1, s[0])
Xmap = np.arange(x1, x2)
Ymap = np.arange(y1, y2)
local_scoremap = scoremaps[y1:y2, x1:x2, i]
gt0 = (local_scoremap > 0)
if gt0.any():
pos_scoremap = gt0 * local_scoremap
xAcc = np.sum(pos_scoremap * Xmap)
yAcc = np.sum(np.transpose(pos_scoremap) * Ymap)
scoreAcc = np.sum(pos_scoremap)
new_coord2d.append([xAcc / scoreAcc, yAcc / scoreAcc])
else:
new_coord2d.append([coord2d[i, 0], coord2d[i, 1]])
coord2d = np.array(new_coord2d, dtype=np.float32)
return coord2d.astype(np.float32), np.array(scores, dtype=np.float32)
"""
Tensorized get_color_affinity()
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = torch.clamp(v, min=0., max=1.) * summed
# v = min(max(v, 0.0), 1.0) * summed
value = v.cpu().detach().numpy() # [O, H, W]
O, H, W = value.shape
record = np.zeros([O, H, W])
out = np.zeros([O, H, W, 3], dtype=value.dtype)
out[:, :, :, 0] = 255.
print(out.shape)
# if v < RY:
# c = (255., 255. * (v / (RY)), 0.)
idx = np.where(np.logical_and(value < RY, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255. * value[idx] / RY
# elif v < RY + YG:
# c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
idx = np.where(np.logical_and(value < RY + YG, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 255. * (1 - ((value[idx] - RY) / (YG)))
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255.
# elif v < RY + YG + GC:
# c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
idx = np.where(np.logical_and(value < RY + YG + GC, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 0.
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255
idx_ext = idx + (np.array([2] * len(idx[0])),)
out[idx_ext] = 255. * ((value[idx] - RY - YG) / (GC))
# elif v < RY + YG + GC + CB:
# c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
idx = np.where(np.logical_and(value < RY + YG + GC + CB, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 0.
idx_ext = idx + (np.array([1] * len(idx[0])),)
out[idx_ext] = 255. * (1 - ((value[idx] - RY - YG - GC) / (CB)))
idx_ext = idx + (np.array([2] * len(idx[0])),)
out[idx_ext] = 255.
# elif v < summed - MR:
# c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
idx = np.where(np.logical_and(value < summed - MR, record == 0))
record[idx] = 1
idx_ext = idx + (np.array([0] * len(idx[0])),)
out[idx_ext] = 255.
"""
| body2hands-main | visualization/POF/utils/PAF.py |
import numpy as np
def transReProjectionLoss(t, X0, K, uv):
assert t.shape == (3,)
assert len(X0.shape) == 2 and X0.shape[1] == 3
assert K.shape == (3, 3)
assert len(uv.shape) == 2 and uv.shape[1] == 2
X = X0 + t[np.newaxis, :]
x = X.dot(K.T)
x /= x[:, 2][:, np.newaxis]
return np.sum(np.square(x[:, :2] - uv))
| body2hands-main | visualization/POF/utils/optimization.py |
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str], 1) # my modification: replace no more than once
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def load_weights_to_dict(checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
return var_to_shape_map
| body2hands-main | visualization/POF/utils/load_ckpt.py |
import tensorflow as tf
import json
import numpy as np
class AdamModel(object):
num_shape_coeff = 30
num_vertices = 18540
num_joints = 62
def __init__(self):
# read in model file
model_file = 'utils/adam_v1_plus2.json'
with open(model_file) as f:
model_data = json.load(f)
pca_file = 'utils/adam_blendshapes_348_delta_norm.json'
with open(pca_file) as f:
pca_data = json.load(f)
with tf.variable_scope("AdamModel"):
self.mean_shape = tf.constant(np.array(pca_data['mu']), shape=(self.num_vertices * 3,), name='mean_shape', dtype=tf.float32)
self.shape_basis = tf.constant(np.array(pca_data['Uw1']), name='shape_basis', dtype=tf.float32)
J_reg_sparse = model_data['adam_J_regressor_big']
J_reg_size = np.array(J_reg_sparse[0], dtype=np.int32)[:2]
J_reg = np.array(J_reg_sparse[1:], dtype=np.float32)
J_reg_indices = J_reg[:, :2].astype(np.int32)
J_reg_vals = J_reg[:, 2]
self.J_reg = tf.sparse_reorder(tf.SparseTensor(J_reg_indices, J_reg_vals, J_reg_size))
self.J_reg_dense = tf.sparse_tensor_to_dense(self.J_reg)
# parental relationship (for forward_kinametics)
kintree_table = np.array(model_data['kintree_table'], dtype=np.int32)
id_to_col = np.zeros((self.num_joints), dtype=np.int32)
self.m_parent = np.zeros((self.num_joints), dtype=np.int32) # !: This is numpy array.
for i in range(kintree_table.shape[1]):
id_to_col[kintree_table[1, i]] = i
for i in range(1, kintree_table.shape[1]):
self.m_parent[i] = id_to_col[kintree_table[0, i]]
def reconstruct(self, pose=None, coeff=None, trans=None):
with tf.variable_scope("AdamModel"):
if pose is None and coeff is None:
batch_size = 1
else:
if pose is not None:
batch_size = pose.get_shape().as_list()[0]
else:
batch_size = coeff.get_shape().as_list()[0]
if coeff is None:
coeff = tf.zeros((batch_size, self.num_shape_coeff), dtype=tf.float32)
assert len(coeff.get_shape().as_list()) == 2 # [batch_size, shape_coeff]
batch_size = coeff.get_shape().as_list()[0]
V = self.mean_shape + tf.matmul(coeff, self.shape_basis, transpose_b=True) # mean + shape_basis * shape_coeff
# mat_V = tf.reshape(V, [self.num_vertices, 3])
# J0 = tf.transpose(tf.sparse_tensor_dense_matmul(self.J_reg, V))
J0 = tf.matmul(V, self.J_reg_dense, transpose_b=True)
mat_J0 = tf.reshape(J0, [batch_size, -1, 3])
if pose is None:
pose = tf.zeros((batch_size, 3 * self.num_joints), dtype=tf.float32) # note different size with coeff
assert len(pose.get_shape().as_list()) == 2 # [batch_size, 3 * num_joints]
Js = []
for i in range(batch_size):
mat_J = self.forward_kinametics(mat_J0[i, :, :], pose[i, :])
if trans is not None: # [batch_size, 3]
assert len(trans.get_shape().as_list()) == 2
mat_J = mat_J + trans[i, :]
J = tf.reshape(mat_J, [-1])
Js.append(J)
Js = tf.stack(Js, axis=0)
return Js
def forward_kinametics(self, J0, pose):
with tf.variable_scope("forward_kinametics"):
Rs = [] # transformation matrix
ts = []
R0 = self.AngleAxisToRotationMatrix(pose[:3])
t0 = tf.transpose(J0[0:1, :])
Rs.append(R0)
ts.append(t0)
for idj in range(1, self.num_joints):
ipar = self.m_parent[idj]
if idj in (10, 11): # foot ends
angles = tf.zeros((3,), dtype=pose.dtype)
elif idj in (7, 8): # foot ankle
angles = tf.concat([pose[idj * 3:(idj + 1) * 3 - 1], tf.zeros([1, ], dtype=pose.dtype)], axis=0)
elif idj in (24, 26, 27, 28, 31, 32, 35, 39, 40, 44, 47, 48, 51, 52, 55, 56, 59, 60):
angles = tf.concat([tf.zeros([2, ], dtype=pose.dtype), pose[idj * 3 + 2:(idj + 1) * 3]], axis=0)
else:
angles = pose[idj * 3:(idj + 1) * 3]
R = self.EulerAngleToRotationMatrix(angles) # in ceres function, R is assumed to be row major, but in adam_reconstruct_euler, R is column major.
R = tf.matmul(Rs[ipar], R)
t = ts[ipar] + tf.matmul(Rs[ipar], tf.transpose(J0[idj:(idj + 1), :] - J0[ipar:(ipar + 1), :]))
Rs.append(R)
ts.append(t)
for idj in range(self.num_joints):
ts[idj] = ts[idj] - tf.matmul(Rs[idj], tf.transpose(J0[idj:(idj + 1), :]))
J_out = []
for idj in range(self.num_joints):
J_out.append(tf.matmul(Rs[idj], tf.transpose(J0[idj:(idj + 1), :])) + ts[idj]) # original pose -> transformed pose (world coordinate)
J_out = tf.transpose(tf.concat(J_out, axis=1))
return J_out
@staticmethod
def AngleAxisToRotationMatrix(angle_axis):
""" angle_axis is a 3d vector whose direction points to the rotation axis and whose norm is the angle (in radians) """
with tf.variable_scope("AngleAxisToRotationMatrix"):
theta = tf.norm(angle_axis)
cos = tf.cos(theta)
sin = tf.sin(theta)
xyz = tf.divide(angle_axis, theta)
x = xyz[0]
y = xyz[1]
z = xyz[2]
# when theta > 0
R00 = cos + x * x * (1. - cos)
R10 = sin * z + x * y * (1. - cos)
R20 = -sin * y + x * z * (1. - cos)
Rcol0 = tf.stack([R00, R10, R20], axis=0)
R01 = x * y * (1. - cos) - z * sin
R11 = cos + y * y * (1. - cos)
R21 = x * sin + y * z * (1. - cos)
Rcol1 = tf.stack([R01, R11, R21], axis=0)
R02 = y * sin + x * z * (1. - cos)
R12 = -x * sin + y * z * (1. - cos)
R22 = cos + z * z * (1. - cos)
Rcol2 = tf.stack([R02, R12, R22], axis=0)
R = tf.stack([Rcol0, Rcol1, Rcol2], axis=1)
# when theta == 0
R_00 = tf.ones([], dtype=angle_axis.dtype)
R_10 = angle_axis[2]
R_20 = -angle_axis[1]
R_col0 = tf.stack([R_00, R_10, R_20], axis=0)
R_01 = -angle_axis[2]
R_11 = tf.ones([], dtype=angle_axis.dtype)
R_21 = angle_axis[0]
R_col1 = tf.stack([R_01, R_11, R_21], axis=0)
R_02 = angle_axis[1]
R_12 = -angle_axis[0]
R_22 = tf.ones([], dtype=angle_axis.dtype)
R_col2 = tf.stack([R_02, R_12, R_22], axis=0)
R_ = tf.stack([R_col0, R_col1, R_col2], axis=1)
return tf.cond(tf.greater(theta, 0), lambda: R, lambda: R_)
@staticmethod
def EulerAngleToRotationMatrix(euler_angle):
""" This function computes the rotation matrix corresponding to Euler Angle (x, y, z) R_z * R_y * R_x (consistent with Ceres). (x, y, z) in degrees."""
with tf.variable_scope("EulerAngleToRotationMatrix"):
deg = euler_angle * np.pi / 180
cos = tf.cos(deg)
sin = tf.sin(deg)
c3 = cos[0]
c2 = cos[1]
c1 = cos[2]
s3 = sin[0]
s2 = sin[1]
s1 = sin[2]
R00 = c1 * c2
R10 = s1 * c2
R20 = -s2
Rcol0 = tf.stack([R00, R10, R20], axis=0)
R01 = -s1 * c3 + c1 * s2 * s3
R11 = c1 * c3 + s1 * s2 * s3
R21 = c2 * s3
Rcol1 = tf.stack([R01, R11, R21], axis=0)
R02 = s1 * s3 + c1 * s2 * c3
R12 = -c1 * s3 + s1 * s2 * c3
R22 = c2 * c3
Rcol2 = tf.stack([R02, R12, R22], axis=0)
R = tf.stack([Rcol0, Rcol1, Rcol2], axis=1)
return R
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
a = AdamModel()
pose_np = np.zeros((2, 3 * 62,), dtype=np.float32)
pose_np[0, 3 * 16 + 1] = -90.
pose = tf.Variable(pose_np)
J = a.reconstruct(pose=pose)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
JJ = sess.run(J)
JJ = JJ.reshape(2, -1, 3)
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(JJ[0, :, 0], JJ[0, :, 1], JJ[0, :, 2], color='red')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(122, projection='3d')
ax.scatter(JJ[1, :, 0], JJ[1, :, 1], JJ[1, :, 2], color='red')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.axis('equal')
# from meshWrapper import meshWrapper
# meshlib = meshWrapper("/home/donglaix/Documents/Experiments/hand_model/build/libPythonWrapper.so")
# meshlib.load_totalmodel()
# meshlib.reset_value()
# meshlib.cpose[:] = pose_np.tolist()
# ax = fig.add_subplot(222)
# img1 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=0)
# ax.imshow(img1)
# ax = fig.add_subplot(223)
# img2 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=1)
# ax.imshow(img2)
# ax = fig.add_subplot(224)
# img3 = meshlib.total_visualize(cameraMode=False, target=False, first_render=False, position=2)
# ax.imshow(img3)
plt.show()
| body2hands-main | visualization/POF/utils/AdamModel.py |
import tensorflow as tf
import math
import numpy as np
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu * tensor, name=name)
return out_tensor
@classmethod
def relu(cls, tensor, name='relu_pure'):
out_tensor = tf.maximum(tensor, tf.constant(0.0, dtype=tf.float32), name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, leaky=True, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
if leaky:
out_tensor = cls.leaky_relu(tensor, name='out')
else:
out_tensor = cls.relu(tensor, name='out')
return out_tensor
@classmethod
def conv3d(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, kernel_size, in_size[4], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv3d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[4]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv3d_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, leaky=True, trainable=True):
tensor = cls.conv3d(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
if leaky:
out_tensor = cls.leaky_relu(tensor, name='out')
else:
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width / 2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu * tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
| body2hands-main | visualization/POF/utils/ops.py |
from utils.AdamModel import AdamModel
from utils.PAF import PAFConnection
import tensorflow as tf
import numpy as np
import json
if __name__ == '__main__':
adam = AdamModel()
adam_joints = adam.reconstruct()
sess = tf.Session()
V_vec, joints_v = sess.run([adam.mean_shape, adam_joints])
sess.close()
joints_v = joints_v.reshape(adam.num_joints, 3)
V = V_vec.reshape(adam.num_vertices, 3)
coords3d = np.zeros([19, 3], dtype=np.float64)
coords3d[1] = joints_v[12]
coords3d[2] = joints_v[17]
coords3d[3] = joints_v[19]
coords3d[4] = joints_v[21]
coords3d[5] = joints_v[16]
coords3d[6] = joints_v[18]
coords3d[7] = joints_v[20]
coords3d[8] = joints_v[2]
coords3d[9] = joints_v[5]
coords3d[10] = joints_v[8]
coords3d[11] = joints_v[1]
coords3d[12] = joints_v[4]
coords3d[13] = joints_v[7]
coords3d[0] = V[8130]
coords3d[16] = V[10088]
coords3d[17] = V[6970]
coords3d[18] = V[1372]
coords3d[14] = V[9707]
coords3d[15] = V[2058]
PAF_lengths = [[], []]
for conn in PAFConnection[0]:
vector = coords3d[conn[1]] - coords3d[conn[0]]
length = np.sqrt(vector.dot(vector))
PAF_lengths[0].append(length)
coords3d_hand = np.zeros([21, 3], dtype=np.float64)
coords3d_hand[0] = joints_v[20]
coords3d_hand[1] = joints_v[25]
coords3d_hand[2] = joints_v[24]
coords3d_hand[3] = joints_v[23]
coords3d_hand[4] = joints_v[22]
coords3d_hand[5] = joints_v[29]
coords3d_hand[6] = joints_v[28]
coords3d_hand[7] = joints_v[27]
coords3d_hand[8] = joints_v[26]
coords3d_hand[9] = joints_v[33]
coords3d_hand[10] = joints_v[32]
coords3d_hand[11] = joints_v[31]
coords3d_hand[12] = joints_v[30]
coords3d_hand[13] = joints_v[37]
coords3d_hand[14] = joints_v[36]
coords3d_hand[15] = joints_v[35]
coords3d_hand[16] = joints_v[34]
coords3d_hand[17] = joints_v[41]
coords3d_hand[18] = joints_v[40]
coords3d_hand[19] = joints_v[39]
coords3d_hand[20] = joints_v[38]
for conn in PAFConnection[1]:
vector = coords3d_hand[conn[1]] - coords3d_hand[conn[0]]
length = np.sqrt(vector.dot(vector))
PAF_lengths[1].append(length)
with open('utils/default_PAF_lengths.json', 'w') as f:
json.dump(PAF_lengths, f)
| body2hands-main | visualization/POF/utils/default_PAF_length.py |
import numpy as np
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
| body2hands-main | visualization/POF/utils/EvalUtil.py |
# Don't use anaconda for this
import ctypes
import os
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import numpy as np
class wrapper_hand_model(object):
def __init__(self, lib_file='./utils/libPythonWrapper.so', model_file='./utils/hand2_l_all_uv.json'):
self.lib = ctypes.cdll.LoadLibrary(lib_file)
self.fit_hand3d = self.lib.fit_hand3d
self.fit_hand3d.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.c_char_p, ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool]
self.fit_hand3d.restype = None
self.Opengl_visualize = self.lib.Opengl_visualize
self.Opengl_visualize.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_bool, ctypes.c_uint, ctypes.c_int, ctypes.c_bool, ctypes.c_bool]
self.Opengl_visualize.restype = None
self.fit_hand2d = self.lib.fit_hand2d
self.fit_hand2d.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_char_p,
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool,
ctypes.c_double, ctypes.c_int]
self.fit_hand2d.restype = None
self.extract_fit_result = self.lib.extract_fit_result
self.extract_fit_result.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_bool]
self.extract_fit_result.restype = None
self.lib.set_calibK.argtypes = [ctypes.POINTER(ctypes.c_double)]
self.lib.set_calibK.restype = None
self.cmodel_file = ctypes.create_string_buffer(model_file.encode('ascii'))
self.ctarget_array = (ctypes.c_double * 63)()
self.ctrans = (ctypes.c_double * 3)()
self.ccoeff = (ctypes.c_double * 63)()
self.cpose = (ctypes.c_double * 63)()
self.cret_bytes = (ctypes.c_ubyte * (600 * 600 * 3))()
self.ctarget2d_array = (ctypes.c_double * 42)()
self.calibK = (ctypes.c_double * 9)()
self.cret_bytes_cam = (ctypes.c_ubyte * (1080 * 1920 * 4))()
self.PAF_array = (ctypes.c_double * (20 * 3))()
def reset_value(self):
self.ctrans[:] = [0.0, 0.0, 2.0]
self.ccoeff[:] = [1.0 for _ in range(63)]
self.cpose[:] = [0.0 for _ in range(63)]
def fit3d(self, joint3d, regressor_type=0, euler=True):
assert joint3d.shape == (21, 3)
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.fit_hand3d(self.ctarget_array, self.cmodel_file, self.cpose, self.ccoeff, self.ctrans, regressor_type, euler)
trans = np.array(self.ctrans[:])
pose = np.array(self.cpose[:]).reshape(-1, 3)
coeff = np.array(self.ccoeff[:]).reshape(-1, 3)
return trans, pose, coeff
def fit2d(self, joint2d, calibK, PAF, regressor_type=0, euler=True, prior_weight=100.0, mode=0):
assert joint2d.shape == (21, 2) and calibK.shape == (3, 3)
self.ctarget2d_array[:] = joint2d.reshape(-1).tolist()
self.calibK[:] = calibK.reshape(-1).tolist()
assert PAF.size == len(self.PAF_array[:])
self.PAF_array[:] = PAF.reshape(-1).tolist()
self.fit_hand2d(self.ctarget2d_array, self.calibK, self.PAF_array, self.cmodel_file, self.cpose, self.ccoeff, self.ctrans,
regressor_type, euler, prior_weight, mode)
trans = np.array(self.ctrans[:])
pose = np.array(self.cpose[:]).reshape(-1, 3)
coeff = np.array(self.ccoeff[:]).reshape(-1, 3)
return trans, pose, coeff
def render(self, cameraMode=False, target=True, first_render=False, position=0, regressor_type=0, stay=False, euler=True):
if cameraMode:
read_buffer = self.cret_bytes_cam
else:
read_buffer = self.cret_bytes
if target:
if first_render:
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, self.ctarget_array, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, self.ctarget_array, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
else:
if first_render:
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, None, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
self.Opengl_visualize(self.cmodel_file, read_buffer, self.cpose, self.ccoeff, self.ctrans, None, ctypes.c_bool(cameraMode), position, regressor_type, stay, euler)
img = bytes(read_buffer)
if not cameraMode:
img = Image.frombytes("RGBA", (600, 600), img)
else:
img = Image.frombytes("RGBA", (1920, 1080), img)
img = ImageOps.flip(img)
return img
def set_calibK(self, K):
self.calibK[:] = K.reshape(-1).tolist()
self.lib.set_calibK(self.calibK)
if __name__ == '__main__':
import numpy as np
wrapper = wrapper_hand_model("/home/donglaix/Documents/Experiments/hand_model/build/libPythonWrapper.so")
joint3d = np.array([-33.3889, -173.355, -36.0744, -35.0518, -173.959, -37.7108, -36.5972, -176.126, -40.7544, -37.4367, -178.032, -43.6272, -38.7743, -178.843, -45.5877, -36.4731, -180.718, -38.2183, -37.0009, -181.596, -42.4443, -37.4651, -181.437, -45.0006, -37.7732, -181.458, -47.0573, -34.2598, -180.606, -38.3926, -35.2143, -
180.671, -43.2699, -36.3031, -179.876, -45.6931, -37.1902, -179.438, -47.745, -32.0926, -179.69, -38.4972, -33.7518, -179.847, -42.8798, -34.9357, -179.212, -45.3947, -35.7699, -178.853, -47.3468, -30.3247, -178.334, -39.2571, -31.8778, -178.837, -42.4667, -33.003, -178.501, -44.2697, -33.8762, -178.325, -45.8248]).reshape(-1, 3)
joint2d = np.array([1284.646, 254.091, 1296.991, 248.479, 1319.012, 231.635, 1339.5621, 217.027, 1354.4766, 209.81, 1300.0491, 200.093, 1330.055, 192.596, 1348.5556, 192.777, 1363.3952, 191.943, 1299.7998, 202.764, 1334.6115,
200.494, 1352.7438, 204.628, 1368.139, 206.547, 1299.2785, 210.884, 1330.8779, 207.547, 1349.5700, 210.478, 1364.09, 211.918, 1303.6187, 221.421, 1326.7478, 216.127, 1340.2151, 217.196, 1351.8205, 217.42]).reshape(-1, 2)
K = np.array([[1633.34, 0, 942.256], [0, 1628.84, 557.344], [0, 0, 1]])
wrapper.fit3d(joint3d)
wrapper.fit2d(joint2d, K)
img = wrapper.render(cameraMode=False, first_render=True)
# print(trans)
# print(pose)
# print(coeff)
plt.imshow(img)
plt.show()
| body2hands-main | visualization/POF/utils/wrapper_hand_model.py |
import numpy as np
from math import factorial
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
| body2hands-main | visualization/POF/utils/smoothing.py |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider
import utils.general
class vis_heatmap3d(object):
def __init__(self, fig, ax, heatmap, keypoints=None, type_str=None):
assert len(heatmap.shape) == 4
self.fig = fig
self.idx = 0
self.threshold = 0.5
self.heatmap = heatmap
self.ax = ax
self.keypoints = keypoints
self.type_str = type_str
axcolor = 'lightgoldenrodyellow'
axx = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
self.slider_threshold = Slider(axx, 'threshold', 0.0, 1.0, valinit=0.5)
self.slider_threshold.on_changed(self.update)
def draw(self):
self.ax.clear()
if self.keypoints is not None:
utils.general.plot3d(self.ax, self.keypoints, self.type_str)
active_map = self.heatmap[:, :, :, self.idx]
Z, Y, X = np.where(active_map >= self.threshold)
colors = [(1 - s) * np.array([0., 0., 1.], dtype=float) for s in active_map[Z, Y, X]]
self.ax.scatter(X, Y, Z, color=colors)
def update(self, val):
self.threshold = self.slider_threshold.val
self.draw()
self.fig.canvas.draw_idle()
| body2hands-main | visualization/POF/utils/vis_heatmap3d.py |
import tensorflow as tf
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| body2hands-main | visualization/POF/utils/multigpu.py |
import ctypes
from PIL import Image, ImageOps
import numpy as np
class meshWrapper(object):
def __init__(self, lib_file='./utils/libPythonWrapper.so'):
self.lib = ctypes.cdll.LoadLibrary(lib_file)
# extern "C" void load_totalmodel(char* obj_file, char* model_file, char* pca_file);
self.lib.load_totalmodel.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
self.lib.load_totalmodel.restype = None
self.obj_file = ctypes.create_string_buffer('./utils/mesh_nofeet.obj'.encode('ascii'))
self.model_file = ctypes.create_string_buffer('./utils/adam_v1_plus2.json'.encode('ascii'))
self.pca_file = ctypes.create_string_buffer('./utils/adam_blendshapes_348_delta_norm.json'.encode('ascii'))
self.correspondence_file = ctypes.create_string_buffer('./utils/correspondences_nofeet.txt'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/adam_cocoplus_regressor.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_ls.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_nonneg.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_combined_angjoo1.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg_root.json'.encode('ascii'))
# self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1.json'.encode('ascii'))
self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1_root.json'.encode('ascii'))
# extern "C" void fit_total3d(double* targetJoint, double* pose, double* coeff, double* trans)
self.lib.fit_total3d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5
self.lib.fit_total3d.restype = None
self.lib.fit_total2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 6
self.lib.fit_total2d.restype = None
self.lib.fit_total3d2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 7
self.lib.fit_total3d2d.restype = None
# extern "C" void fit_PAF_vec(double* targetJoint2d, double* PAF_vec, double* calibK, double* pose, double* coeff, double* trans, double* face_coeff)
self.lib.fit_PAF_vec.argtypes = [ctypes.POINTER(ctypes.c_double)] * 8 + [ctypes.c_uint, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool]
self.lib.fit_PAF_vec.restype = None
# Eigen::Matrix<double, 62, 3, Eigen::RowMajor> m_adam_pose; //62 ==TotalModel::NUM_JOINTS
# Eigen::Matrix<double, 30, 1> m_adam_coeffs; //30 ==TotalModel::NUM_SHAPE_COEFFICIENTS
# Eigen::Vector3d m_adam_t;
self.cpose = (ctypes.c_double * (62 * 3))()
self.ccoeff = (ctypes.c_double * 30)()
self.ctrans = (ctypes.c_double * 3)()
self.cface_coeff = (ctypes.c_double * 200)()
self.ctarget_array = (ctypes.c_double * ((62 + 70 + 6) * 3))()
self.ctarget_array_2d = (ctypes.c_double * ((63 + 70 + 6) * 2))()
self.cret_bytes = (ctypes.c_ubyte * (600 * 600 * 4))()
self.cfull_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))()
self.cortho_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))()
self.PAF_array = (ctypes.c_double * (63 * 3))()
self.out_joint = (ctypes.c_double * (65 * 3))() # regressor 2: 19 (small coco regressor) + 20 (hand) + 20 (hand) + 6 (feet)
self.calibK = (ctypes.c_double * 9)()
# extern "C" void Total_visualize(GLubyte* ret_bytes, double* targetJoint, uint CameraMode, uint position, bool meshSolid, float scale, int vis_type)
self.lib.Total_visualize.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double),
ctypes.c_uint, ctypes.c_uint, ctypes.c_bool, ctypes.c_float, ctypes.c_int, ctypes.c_bool]
self.lib.Total_visualize.restype = None
self.lib.VisualizeSkeleton.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.c_uint, ctypes.c_uint, ctypes.c_float]
self.lib.VisualizeSkeleton.restype = None
self.lib.init_renderer.argtypes = []
self.lib.init_renderer.restype = None
self.lib.reconstruct_adam.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int]
self.lib.reconstruct_adam.restype = None
self.lib.reconstruct_adam_mesh.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int, ctypes.c_bool]
self.lib.reconstruct_adam_mesh.restype = None
self.lib.fit_h36m_groundtruth.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5
self.lib.fit_h36m_groundtruth.restype = None
self.lib.adam_refit.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint]
self.lib.adam_refit.restype = None
self.lib.adam_sequence_init.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint]
self.lib.adam_sequence_init.restype = None
self.lib.adam_hsiu_fit_dome.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_bool]
self.lib.adam_hsiu_fit_dome.restype = None
def reset_value(self):
self.ctrans[:] = [0.0, 0.0, 500.0]
self.ccoeff[:] = [0.0] * 30
self.cpose[:] = [0.0] * (62 * 3)
self.cface_coeff[:] = [0.0] * 200
def load_totalmodel(self):
self.lib.load_totalmodel(self.obj_file, self.model_file, self.pca_file, self.correspondence_file, self.cocoplus_regressor_file)
def fit_total3d(self, joint3d):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
self.lib.fit_total3d(self.ctarget_array, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def total_visualize(self, cameraMode=0, target=True, first_render=False, position=0, meshSolid=True, scale=1.0, vis_type=1, show_joint=True):
if cameraMode == 0:
read_buffer = self.cret_bytes
read_size = (600, 600)
elif cameraMode == 1:
read_buffer = self.cfull_bytes
read_size = (1920, 1080)
else:
assert cameraMode == 2
read_buffer = self.cortho_bytes
read_size = (1920, 1080)
if first_render:
self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode),
ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type),
ctypes.c_bool(show_joint))
read_buffer[:] = [0] * len(read_buffer[:])
self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode),
ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type),
ctypes.c_bool(show_joint))
img = bytes(read_buffer[:read_size[0] * read_size[1] * 4])
img = Image.frombytes("RGBA", read_size, img)
img = ImageOps.flip(img)
return img
def fit_total2d(self, joint2d, K):
assert joint2d.shape[1] == 2, joint2d.shape
assert K.shape == (3, 3), K
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist()
self.lib.fit_total2d(self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def fit_total3d2d(self, joint3d, joint2d, K):
assert joint3d.shape[1] == 3, joint3d.shape
assert joint2d.shape[1] == 2, joint2d.shape
assert K.shape == (3, 3), K
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist()
self.calibK[:] = K.reshape(-1).tolist()
self.lib.fit_total3d2d(self.ctarget_array, self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff)
def visualize_skeleton(self, joint3d, cameraMode=0, first_render=False, position=0, scale=1.0):
if cameraMode == 0:
read_buffer = self.cret_bytes
read_size = (600, 600)
elif cameraMode == 1:
read_buffer = self.cfull_bytes
read_size = (1920, 1080)
else:
assert cameraMode == 2
read_buffer = self.cortho_bytes
read_size = (1920, 1080)
read_buffer[:] = [0] * len(read_buffer[:])
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist()
if first_render:
self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale))
self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale))
img = bytes(read_buffer[:read_size[0] * read_size[1] * 4])
img = Image.frombytes("RGBA", read_size, img)
img = ImageOps.flip(img)
return img
def fit_PAF_vec(self, joint2d, PAF_vec, K, joint3d=None, regressor_type=0, quan=False, fitPAFfirst=False, fit_face_exp=False):
assert joint2d.shape == (139, 2), joint2d.shape
assert K.shape == (3, 3), K
assert PAF_vec.shape[1] == 3, PAF_vec.shape
assert PAF_vec.shape[0] == 63, PAF_vec.shape
if joint3d is not None:
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:])
self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist()
self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist()
self.lib.fit_PAF_vec(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff,
None if joint3d is None else self.ctarget_array, ctypes.c_uint(regressor_type),
ctypes.c_bool(quan), ctypes.c_bool(fitPAFfirst), ctypes.c_bool(fit_face_exp))
def adam_refit(self, joint3d, regressor_type):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.lib.adam_refit(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type)
def adam_sequence_init(self, joint3d, regressor_type):
assert joint3d.shape[1] == 3, joint3d.shape
self.ctarget_array[:] = joint3d.reshape(-1).tolist()
self.lib.adam_sequence_init(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type)
def adam_hsiu_fit_dome(self, target_joint, freeze_shape=False):
assert target_joint.shape == (20, 3)
self.ctarget_array[:60] = target_joint.reshape(-1).tolist()
self.lib.adam_hsiu_fit_dome(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, freeze_shape)
def refit_eval_h36m(self, regressor_type, prior_weight=1.0):
# refit Adam using skeleton reconstructed from current params, update params with pose prior && AngleAxis
self.lib.refit_eval_h36m(self.cpose, self.ccoeff, self.ctrans, ctypes.c_uint(regressor_type), ctypes.c_double(prior_weight))
def fitSingleStage(self, joint2d, PAF_vec, K, regressor_type=0, fit_face_exp=False):
assert joint2d.shape == (139, 2), joint2d.shape
assert K.shape == (3, 3), K
assert PAF_vec.shape[1] == 3, PAF_vec.shape
assert PAF_vec.shape[0] == 63, PAF_vec.shape
self.calibK[:] = K.reshape(-1).tolist()
self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:])
self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist()
self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist()
self.lib.fitSingleStage(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff,
ctypes.c_uint(regressor_type), ctypes.c_bool(fit_face_exp))
| body2hands-main | visualization/POF/utils/meshWrapper.py |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import cv2
# in A4 order (SMC)
tbody_connMat = np.array([0, 1, 0, 3, 3, 4, 4, 5, 0, 9, 9, 10, 10, 11, 0, 2, 2, 6, 6, 7, 7, 8, 2, 12, 12, 13, 13, 14, 1, 15, 15, 16, 1, 17, 17, 18, 0, 19, 0, 20, 20, 12, 20, 6])
thand_connMat = np.array([0, 1, 1, 2, 2, 3, 3, 4, 0, 5, 5, 6, 6, 7, 7, 8, 0, 9, 9, 10, 10, 11, 11, 12, 0, 13, 13, 14, 14, 15, 15, 16, 0, 17, 17, 18, 18, 19, 19, 20])
total_connMat = np.concatenate([tbody_connMat, thand_connMat + 21, thand_connMat + 42], axis=0).reshape(-1, 2)
connMat = {
'body': np.array([[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [0, 14], [14, 16], [0, 15], [15, 17], [1, 18], [1, 19]], dtype=int),
'hand': np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9],
[0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]]),
'total': total_connMat,
'face': np.array([]),
'human3.6m': np.array([[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15], [15, 16]])
}
type_strs = ['body', 'hand']
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps) + 1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: # 1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: # 2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps) - 1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind + 1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled // 2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled // 2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def detect_keypoints2d(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_uv = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_uv[i, 0] = u
keypoint_uv[i, 1] = v
return keypoint_uv
def detect_keypoints3d(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 5:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 4, "This function was only designed for 3D Scoremaps."
assert (s[3] < s[2]) and (s[3] < s[1]) and (s[3] < s[0]), "Probably the input is not correct, because [D, H, W, C] is expected."
keypoint_coords = np.zeros((s[3], 3))
for i in range(s[3]):
z, y, x = np.unravel_index(np.argmax(scoremaps[:, :, :, i]), (s[0], s[1], s[2]))
keypoint_coords[i, 0] = x
keypoint_coords[i, 1] = y
keypoint_coords[i, 2] = z
return keypoint_coords
def plot2d(ax, keypoint, type_str='body', valid_idx=None, color='red', s=10):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 2
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
ax.scatter(plot_point[:, 0], plot_point[:, 1], c=color, s=s)
for conn in connMat[type_str]:
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
coords = np.vstack([coord1, coord2])
ax.plot(coords[:, 0], coords[:, 1], c=color)
def plot2d_cv2(img, keypoint, type_str='body', valid_idx=None, s=10, use_color=False):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 2
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
for i, kp in enumerate(plot_point):
x = int(kp[0])
y = int(kp[1])
if x == 0 and y == 0:
continue
if not use_color:
cv2.circle(img, (x, y), s, (255, 0, 0), -1)
else:
if i <= 4:
color = (255, 0, 0)
elif i <= 8:
color = (0, 255, 0)
elif i <= 12:
color = (0, 0, 255)
elif i <= 16:
color = (255, 255, 0)
else:
color = (0, 255, 255)
cv2.circle(img, (x, y), s, color, -1)
for i, conn in enumerate(connMat[type_str]):
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
pt1 = (int(coord1[0]), int(coord1[1]))
pt2 = (int(coord2[0]), int(coord2[1]))
if (pt1[0] == 0 and pt1[1] == 0) or (pt2[0] == 0 and pt2[1] == 0):
continue
if not use_color:
cv2.line(img, pt1, pt2, (255, 0, 0), int(s / 2))
else:
if i < 4:
color = (255, 0, 0)
elif i < 8:
color = (0, 255, 0)
elif i < 12:
color = (0, 0, 255)
elif i < 16:
color = (255, 255, 0)
else:
color = (0, 255, 255)
cv2.line(img, pt1, pt2, color, int(s / 2))
def plot3d(ax, keypoint, type_str='body', valid_idx=None, color='red'):
assert len(keypoint.shape) == 2 and keypoint.shape[1] == 3
if valid_idx is not None:
plot_point = keypoint[valid_idx, :]
else:
plot_point = keypoint
ax.scatter(plot_point[:, 0], plot_point[:, 1], plot_point[:, 2], c=color)
for conn in connMat[type_str]:
coord1 = keypoint[conn[0]]
coord2 = keypoint[conn[1]]
if valid_idx is not None and (not valid_idx[conn[0]] or not valid_idx[conn[1]]):
continue
coords = np.vstack([coord1, coord2])
ax.plot(coords[:, 0], coords[:, 1], coords[:, 2], c=color)
def h36LimbLength(keypoint):
assert keypoint.shape == (17, 3)
connections = np.array([[0, 1], [0, 4], [0, 7], [1, 2], [2, 3], [4, 5], [5, 6], [7, 8], [8, 9], [8, 11], [8, 14], [9, 10], [11, 12], [12, 13], [14, 15], [15, 16]])
Ls = []
for conn in connections:
L = nl.norm(keypoint[conn[0]] - keypoint[conn[1]])
Ls.append(L)
return np.array(Ls, dtype=np.float32)
| body2hands-main | visualization/POF/utils/general.py |
import numpy as np
import numpy.linalg as nl
from utils.general import connMat
a4_to_main = {
'body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'1_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'2_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'openpose_lhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_lhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
human36m_to_main = {
'body': np.array([9, 8, 14, 15, 16, 11, 12, 13, 4, 5, 6, 1, 2, 3, 17, 17, 17, 17, 10, 17], dtype=np.int64)
}
mpi3d_to_main = {
'body': np.array([6, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 28, 28, 28, 28, 7], dtype=np.int64)
}
adam_to_main = {
'body': np.array([12, 17, 19, 21, 16, 18, 20, 2, 5, 8, 1, 4, 7], dtype=np.int64),
'select_body_main': np.arange(1, 14, dtype=np.int64)
}
COCO_to_main = {
'body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64)
}
SMPL_to_main = { # actually COCOPLUS regressor to main
'body': np.array([14, 12, 8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 16, 15, 18, 17, 13], dtype=np.int64)
}
STB_to_main = {
'left_hand': np.array([0, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], dtype=np.int64)
}
MPII_to_main = {
'body': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64),
'body_valid': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64)
}
tsimon_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
GAnerated_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_3d': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.arange(21, dtype=np.int64),
'right_hand_valid': np.arange(21, dtype=np.int64),
'right_hand_3d': np.arange(21, dtype=np.int64)
}
std_body_size = 267.807
std_hand_size = (82.2705 + 79.8843) / 2
def compute_size(joint3d, type_str):
""" use this to compute size for scaling: joints are in main order.
"""
length = 0.0
for ic, conn in enumerate(connMat[type_str]):
if type_str == 'body':
if ic in (2, 3, 5, 6, 8, 9, 11, 12):
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
else:
assert type_str == 'hand'
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
return length
def main_to_a4(joint):
assert joint.shape[0] == 20
output = np.zeros((21, joint.shape[1]), dtype=joint.dtype)
for io, ic in enumerate(a4_to_main['body']):
output[ic, :] = joint[io, :]
output[2, :] = (output[6, :] + output[12, :]) / 2
return output
def main_to_a4_hand(joint):
assert joint.shape[0] == 21
output = np.zeros(joint.shape, dtype=joint.dtype)
output[0] = joint[0]
for i in (1, 5, 9, 13, 17):
output[i:i + 4] = joint[i + 3:i - 1:-1]
return output
def assemble_total_3d(body, lhand, rhand):
len_b = compute_size(body, 'body')
if len_b > 0:
sbody = (std_body_size / len_b) * body
else:
sbody = body
len_l = compute_size(lhand, 'hand')
if len_l > 0:
slhand = (std_hand_size / len_l) * lhand
else:
slhand = lhand
len_r = compute_size(rhand, 'hand')
if len_r > 0:
srhand = (std_hand_size / len_r) * rhand
else:
srhand = rhand
sbody = main_to_a4(sbody)
slhand = main_to_a4_hand(slhand)
srhand = main_to_a4_hand(srhand)
slhand_invalid = (slhand[:, 0] == 0) * (slhand[:, 1] == 0) * (slhand[:, 2] == 0)
srhand_invalid = (srhand[:, 0] == 0) * (srhand[:, 1] == 0) * (srhand[:, 2] == 0)
if not slhand[0].any():
slhand_invalid[:] = True
if not srhand[0].any():
srhand_invalid[:] = True
lhand_idx_a4 = 5
rhand_idx_a4 = 11
shift_lhand = sbody[lhand_idx_a4] - slhand[0]
shift_rhand = sbody[rhand_idx_a4] - srhand[0]
slhand += shift_lhand
srhand += shift_rhand
slhand[slhand_invalid] = 0
srhand[srhand_invalid] = 0
return np.concatenate([sbody, slhand, srhand], axis=0), std_body_size / len_b
def assemble_total_2d(body_2d, lhand_2d, rhand_2d):
keypoint_list = []
for i, item in enumerate((body_2d, lhand_2d, rhand_2d)):
keypoint = item['uv_local']
keypoint = (keypoint - 184) / item['scale2d'] + item['crop_center2d']
valid = item['valid']
keypoint = keypoint * np.stack([valid, valid], axis=1) # remove those invalid values
if i == 0:
keypoint = main_to_a4(keypoint)
else:
keypoint = main_to_a4_hand(keypoint)
keypoint_list.append(keypoint)
ret = np.concatenate(keypoint_list, axis=0)
ret[np.isnan(ret)] = 0.0 # nan when the whole joint is zero
return ret
def main_to_human36m(joint):
# except 9, 10 in human36m
out = np.zeros((17, 3), dtype=joint.dtype)
for im, ih in enumerate(human36m_to_main['body']):
if ih == 17: # virtual zero joint
continue
out[ih] = np.copy(joint[im, :])
out[0] = (out[1] + out[4]) / 2 # middle hip
out[7] = (out[1] + out[4] + out[11] + out[14]) / 4 # abdomen (average of l/r hip, l/r shoulder)
return out
| body2hands-main | visualization/POF/utils/keypoint_conversion.py |
import tensorflow as tf
from utils.ops import NetworkOps
import numpy as np
ops = NetworkOps
class CPM(object):
# The original CPM: set input image to right hand, BGR channel order (OpenCV), image scale to x / 256.0 - 0.5, output channel number to 22 (the last one for background)
def __init__(self, crop_size=256, out_chan=21, withPAF=False, PAFdim=2, numPAF=19, numStage=5, input_chan=3):
self.name = 'CPM'
self.out_chan = out_chan
self.crop_size = crop_size
self.withPAF = withPAF
self.PAFdim = PAFdim
self.numPAF = numPAF
self.numStage = numStage
def init(self, weight_path, sess):
with tf.variable_scope("CPM"):
data_dict = np.load(weight_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
var = tf.get_variable(param_name)
sess.run(var.assign(data))
print('Finish loading weight from {}'.format(weight_path))
def init_pickle(self, session, weight_files=None, exclude_var_list=None):
""" Initializes weights from pickled python dictionaries.
Inputs:
session: tf.Session, Tensorflow session object containing the network graph
weight_files: list of str, Paths to the pickle files that are used to initialize network weights
exclude_var_list: list of str, Weights that should not be loaded
"""
if exclude_var_list is None:
exclude_var_list = list()
import pickle
import os
# Initialize with weights
for file_name in weight_files:
assert os.path.exists(file_name), "File not found."
with open(file_name, 'rb') as fi:
weight_dict = pickle.load(fi)
weight_dict = {k: v for k, v in weight_dict.items() if not any([x in k for x in exclude_var_list])}
if len(weight_dict) > 0:
init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict)
session.run(init_op, init_feed)
print('Loaded %d variables from %s' % (len(weight_dict), file_name))
def init_vgg(self, sess, weight_path='./weights/vgg16.npy'):
print('initialize from ImageNet pretrained VGG')
with tf.variable_scope("CPM"):
data_dict = np.load(weight_path, encoding='latin1').item()
for op_name in data_dict:
if not op_name.startswith("conv") or op_name == 'conv5_3':
continue
with tf.variable_scope(op_name, reuse=True):
assert len(data_dict[op_name]) == 2
for data in data_dict[op_name]:
try:
if data.ndim == 4:
var = tf.get_variable('weights')
elif data.ndim == 1:
var = tf.get_variable('biases')
else:
raise Exception
sess.run(var.assign(data))
except Exception:
print('Fail to load {}'.format(op_name))
print('Finish loading weight from {}'.format(weight_path))
def inference(self, input_image, train=False):
with tf.variable_scope("CPM"):
s = input_image.get_shape().as_list()
assert s[1] == self.crop_size and s[2] == self.crop_size
layers_per_block = [2, 2, 4, 2]
out_chan_list = [64, 128, 256, 512]
pool_list = [True, True, True, False]
# conv1_1 ~ conv4_4
x = input_image
for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1):
for layer_id in range(layer_num):
x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id + 1), kernel_size=3, stride=1, out_chan=chan_num, leaky=False, trainable=train)
if pool:
x = ops.max_pool(x, 'pool%d' % block_id)
PAF = []
if not self.withPAF: # openpose hand net
x = ops.conv_relu(x, 'conv4_3', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv4_4', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train)
conv_feature = ops.conv_relu(x, 'conv5_3_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv_relu(conv_feature, 'conv6_1_CPM', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x = ops.conv(x, 'conv6_2_CPM', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps = [x]
for stage_id in range(2, 7):
x = tf.concat([x, conv_feature], axis=3, name='concat_stage{}'.format(stage_id))
for layer_id in range(1, 6):
x = ops.conv_relu(x, 'Mconv{}_stage{}'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv_relu(x, 'Mconv6_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x = ops.conv(x, 'Mconv7_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps.append(x)
else: # with PAF (openpose body net)
x = ops.conv_relu(x, 'conv4_3_CPM', kernel_size=3, stride=1, out_chan=256, leaky=False, trainable=train)
conv_feature = ops.conv_relu(x, 'conv4_4_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_2_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_3_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'conv5_4_CPM_L1', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x1 = ops.conv(x1, 'conv5_5_CPM_L1', kernel_size=1, stride=1, out_chan=self.PAFdim * self.numPAF, trainable=train)
x2 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_2_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_3_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'conv5_4_CPM_L2', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train)
x2 = ops.conv(x2, 'conv5_5_CPM_L2', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps = [x2]
PAF.append(x1)
for stage_id in range(2, 2 + self.numStage):
x = tf.concat([x1, x2, conv_feature], axis=3, name='concat_stage{}'.format(stage_id))
x1 = ops.conv_relu(x, 'Mconv{}_stage{}_L1'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x, 'Mconv{}_stage{}_L2'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
for layer_id in range(2, 6):
x1 = ops.conv_relu(x1, 'Mconv{}_stage{}_L1'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'Mconv{}_stage{}_L2'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv_relu(x1, 'Mconv6_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x2 = ops.conv_relu(x2, 'Mconv6_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train)
x1 = ops.conv(x1, 'Mconv7_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=self.PAFdim * self.numPAF, trainable=train)
x2 = ops.conv(x2, 'Mconv7_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train)
scoremaps.append(x2)
PAF.append(x1)
return scoremaps, conv_feature, PAF
| body2hands-main | visualization/POF/nets/CPM.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class TempConstReader(BaseReader):
crop_scale_noise_sigma = 0.1
crop_offset_noise_sigma = 0.1
def __init__(self, objtype=0, shuffle=False, batch_size=1, crop_noise=False):
super(TempConstReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert objtype in (0, 1), "This data reader only support single body / hands"
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
# input to this data reader should have two consecutive frames
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['1_img_dir'] = flow_dict['1_img_dirs']
data_dict['2_img_dir'] = flow_dict['2_img_dirs']
data_dict['1_K'] = flow_dict['1_K']
data_dict['1_K'] = flow_dict['2_K']
# rotate and project to camera frame
if self.objtype == 0:
body2d_1, body3d_1 = self.project_tf(flow_dict['1_body'], flow_dict['1_K'], flow_dict['1_R'], flow_dict['1_t'], flow_dict['1_distCoef'])
body2d_2, body3d_2 = self.project_tf(flow_dict['2_body'], flow_dict['2_K'], flow_dict['2_R'], flow_dict['2_t'], flow_dict['2_distCoef'])
body3d_1 = tf.cast(body3d_1, tf.float32)
body3d_2 = tf.cast(body3d_2, tf.float32)
body2d_1 = tf.cast(body2d_1, tf.float32)
body2d_2 = tf.cast(body2d_2, tf.float32)
data_dict['1_keypoint_xyz_origin'] = body3d_1
data_dict['2_keypoint_xyz_origin'] = body3d_2
data_dict['1_keypoint_uv_origin'] = body2d_1
data_dict['2_keypoint_uv_origin'] = body2d_2
data_dict['1_body_valid'] = flow_dict['1_body_valid']
data_dict['2_body_valid'] = flow_dict['2_body_valid']
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand3d_1 = tf.cond(cond_left, lambda: flow_dict['1_left_hand'], lambda: flow_dict['1_right_hand']) # in world coordinate
hand3d_2 = tf.cond(cond_left, lambda: flow_dict['2_left_hand'], lambda: flow_dict['2_right_hand']) # in world coordinate
hand2d_1, hand3d_1 = self.project_tf(hand3d_1, flow_dict['1_K'], flow_dict['1_R'], flow_dict['1_t'], flow_dict['1_distCoef']) # in camera coordinate
hand2d_2, hand3d_2 = self.project_tf(hand3d_2, flow_dict['2_K'], flow_dict['2_R'], flow_dict['2_t'], flow_dict['2_distCoef']) # in camera coordinate
hand3d_1 = tf.cast(hand3d_1, tf.float32)
hand3d_2 = tf.cast(hand3d_2, tf.float32)
hand2d_1 = tf.cast(hand2d_1, tf.float32)
hand2d_2 = tf.cast(hand2d_2, tf.float32)
data_dict['1_keypoint_xyz_origin'] = hand3d_1
data_dict['2_keypoint_xyz_origin'] = hand3d_2
data_dict['1_keypoint_uv_origin'] = hand2d_1
data_dict['2_keypoint_uv_origin'] = hand2d_2
data_dict['cond_left'] = cond_left
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
# read image
if read_image:
img_file_1 = tf.read_file(flow_dict['1_img_dirs'])
img_file_2 = tf.read_file(flow_dict['2_img_dirs'])
image_1 = tf.image.decode_image(img_file_1, channels=3)
image_2 = tf.image.decode_image(img_file_2, channels=3)
image_1 = tf.image.pad_to_bounding_box(image_1, 0, 0, imh, imw)
image_2 = tf.image.pad_to_bounding_box(image_2, 0, 0, imh, imw)
image_1.set_shape((imh, imw, 3))
image_2.set_shape((imh, imw, 3))
image_1 = tf.cast(image_1, tf.float32) / 255.0 - 0.5
image_2 = tf.cast(image_2, tf.float32) / 255.0 - 0.5
data_dict['1_image'] = image_1
data_dict['2_image'] = image_2
if 'mask_dirs_1' in flow_dict:
assert 'mask_dirs_2' in flow_dict
mask_file_1 = tf.read_file(flow_dict['1_mask_dirs'])
mask_file_2 = tf.read_file(flow_dict['2_mask_dirs'])
mask_1 = tf.image.decode_image(mask_file_1, channels=3)
mask_2 = tf.image.decode_image(mask_file_2, channels=3)
mask_1 = tf.image.pad_to_bounding_box(mask_1, 0, 0, imh, imw)
mask_2 = tf.image.pad_to_bounding_box(mask_2, 0, 0, imh, imw)
mask_1.set_shape((imh, imw, 3))
mask_2.set_shape((imh, imw, 3))
mask_1 = mask_1[:, :, 0]
mask_2 = mask_2[:, :, 0]
mask_1 = tf.cast(mask_1, tf.float32)
mask_2 = tf.cast(mask_2, tf.float32)
else:
mask_1 = tf.ones((imh, imw), dtype=tf.float32)
mask_2 = tf.ones((imh, imw), dtype=tf.float32)
data_dict['1_mask'] = mask_1
data_dict['2_mask'] = mask_2
# calculate crop size
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints_1 = body3d_1
keypoints_2 = body3d_2
valid_1 = flow_dict['1_body_valid']
valid_2 = flow_dict['2_body_valid']
elif self.objtype == 1:
keypoints_1 = hand3d_1
keypoints_2 = hand3d_2
valid_1 = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
valid_2 = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['1_hand_valid'] = valid_1
data_dict['2_hand_valid'] = valid_2
crop_center3d_1, scale3d_1, crop_center2d_1, scale2d_1, crop_center3d_2, scale3d_2, crop_center2d_2, scale2d_2 = \
self.calc_crop_scale_temp_const(keypoints_1, flow_dict['1_K'], flow_dict['1_distCoef'], valid_1, keypoints_2, flow_dict['2_K'], flow_dict['2_distCoef'], valid_2)
data_dict['1_crop_center2d'], data_dict['1_scale2d'] = crop_center2d_1, scale2d_1
data_dict['2_crop_center2d'], data_dict['2_scale2d'] = crop_center2d_2, scale2d_2
data_dict['1_crop_center3d'], data_dict['1_scale3d'] = crop_center3d_1, scale3d_1
data_dict['2_crop_center3d'], data_dict['2_scale3d'] = crop_center3d_2, scale3d_2
# do cropping
if self.objtype == 1:
body2d_1 = hand2d_1
body2d_2 = hand2d_2
body3d_1 = hand3d_1
body3d_2 = hand3d_2
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle_1 = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
else:
rotate_angle_1 = 0.0
rotate_angle_2 = tf.random_uniform([], minval=-np.pi * 5 / 180, maxval=np.pi * 5 / 180) + rotate_angle_1
R2_1 = tf.reshape(tf.stack([tf.cos(rotate_angle_1), -tf.sin(rotate_angle_1), tf.sin(rotate_angle_1), tf.cos(rotate_angle_1)]), [2, 2])
R2_2 = tf.reshape(tf.stack([tf.cos(rotate_angle_2), -tf.sin(rotate_angle_2), tf.sin(rotate_angle_2), tf.cos(rotate_angle_2)]), [2, 2])
R3_1 = tf.reshape(tf.stack([tf.cos(rotate_angle_1), -tf.sin(rotate_angle_1), 0, tf.sin(rotate_angle_1), tf.cos(rotate_angle_1), 0, 0, 0, 1]), [3, 3])
R3_2 = tf.reshape(tf.stack([tf.cos(rotate_angle_2), -tf.sin(rotate_angle_2), 0, tf.sin(rotate_angle_2), tf.cos(rotate_angle_2), 0, 0, 0, 1]), [3, 3])
body2d_1 = tf.matmul((body2d_1 - crop_center2d_1), R2_1) + crop_center2d_1
body2d_2 = tf.matmul((body2d_2 - crop_center2d_2), R2_2) + crop_center2d_2
body3d_1 = tf.matmul((body3d_1 - crop_center3d_1), R3_1) + crop_center3d_1
body3d_2 = tf.matmul((body3d_2 - crop_center3d_2), R3_2) + crop_center3d_2
data_dict['1_keypoint_xyz_origin'] = body3d_1 # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['2_keypoint_xyz_origin'] = body3d_2 # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['1_keypoint_uv_origin'] = body2d_1
data_dict['2_keypoint_uv_origin'] = body2d_2
body2d_local_1 = self.update_keypoint2d(body2d_1, crop_center2d_1, scale2d_1)
body2d_local_2 = self.update_keypoint2d(body2d_2, crop_center2d_2, scale2d_2)
data_dict['1_keypoint_uv_local'] = body2d_local_1
data_dict['2_keypoint_uv_local'] = body2d_local_2
if read_image:
image_crop_1 = self.crop_image(image_1, crop_center2d_1, scale2d_1)
image_crop_2 = self.crop_image(image_2, crop_center2d_2, scale2d_2)
data_dict['1_image_crop'] = image_crop_1
data_dict['2_image_crop'] = image_crop_2
mask_crop_1 = self.crop_image(tf.stack([mask_1] * 3, axis=2), crop_center2d_1, scale2d_1)
mask_crop_2 = self.crop_image(tf.stack([mask_2] * 3, axis=2), crop_center2d_2, scale2d_2)
data_dict['1_mask_crop'] = mask_crop_1[:, :, 0]
data_dict['2_mask_crop'] = mask_crop_2[:, :, 0]
data_dict['1_image_crop'] = tf.contrib.image.rotate(data_dict['1_image_crop'], rotate_angle_1)
data_dict['2_image_crop'] = tf.contrib.image.rotate(data_dict['2_image_crop'], rotate_angle_2)
data_dict['1_mask_crop'] = tf.contrib.image.rotate(data_dict['1_mask_crop'], rotate_angle_1)
data_dict['2_mask_crop'] = tf.contrib.image.rotate(data_dict['2_mask_crop'], rotate_angle_2)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image_1 = tf.image.resize_images(data_dict['1_image_crop'], [rescale, rescale])
resized_image_2 = tf.image.resize_images(data_dict['2_image_crop'], [rescale, rescale])
data_dict['1_image_crop'] = tf.image.resize_images(resized_image_1, [self.crop_size, self.crop_size])
data_dict['2_image_crop'] = tf.image.resize_images(resized_image_2, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d_1 = self.create_multiple_gaussian_map(body2d_local_1[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid_1, extra=True) # coord_hw, imsize_hw
scoremap2d_2 = self.create_multiple_gaussian_map(body2d_local_2[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid_2, extra=True) # coord_hw, imsize_hw
data_dict['1_scoremap2d'] = scoremap2d_1
data_dict['2_scoremap2d'] = scoremap2d_2
if withPAF:
from utils.PAF import createPAF
data_dict['1_PAF'] = createPAF(body2d_local_1, body3d_1, self.objtype, (self.crop_size, self.crop_size), True, valid_vec=valid_1)
data_dict['2_PAF'] = createPAF(body2d_local_2, body3d_2, self.objtype, (self.crop_size, self.crop_size), True, valid_vec=valid_2)
data_dict['1_PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
data_dict['2_PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
# create 3D gaussian_map
body3d_local_1 = self.update_keypoint3d(body3d_1, crop_center3d_1, scale3d_1)
body3d_local_2 = self.update_keypoint3d(body3d_2, crop_center3d_2, scale3d_2)
data_dict['1_keypoint_xyz_local'] = body3d_local_1
data_dict['2_keypoint_xyz_local'] = body3d_local_2
# scoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d, valid_vec=valid, extra=True)
# data_dict['1_scoremap3d'] = scoremap3d
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['1_image_crop'] = tf.cond(cond_left, lambda: data_dict['1_image_crop'], lambda: data_dict['1_image_crop'][:, ::-1, :])
data_dict['2_image_crop'] = tf.cond(cond_left, lambda: data_dict['2_image_crop'], lambda: data_dict['2_image_crop'][:, ::-1, :])
data_dict['1_mask_crop'] = tf.cond(cond_left, lambda: data_dict['1_mask_crop'], lambda: data_dict['1_mask_crop'][:, ::-1])
data_dict['2_mask_crop'] = tf.cond(cond_left, lambda: data_dict['2_mask_crop'], lambda: data_dict['2_mask_crop'][:, ::-1])
data_dict['1_scoremap2d'] = tf.cond(cond_left, lambda: data_dict['1_scoremap2d'], lambda: data_dict['1_scoremap2d'][:, ::-1, :])
data_dict['2_scoremap2d'] = tf.cond(cond_left, lambda: data_dict['2_scoremap2d'], lambda: data_dict['2_scoremap2d'][:, ::-1, :])
data_dict['1_keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['1_keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['1_keypoint_uv_local'])
data_dict['2_keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['2_keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['2_keypoint_uv_local'])
if withPAF:
data_dict['1_PAF'] = tf.cond(cond_left, lambda: data_dict['1_PAF'],
lambda: (data_dict['1_PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['1_PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
data_dict['2_PAF'] = tf.cond(cond_left, lambda: data_dict['2_PAF'],
lambda: (data_dict['2_PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['2_PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=20,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=20,
enqueue_many=False)
return dict(zip(names, tensors))
def calc_crop_scale_temp_const(self, keypoints_1, calibK_1, calibDC_1, valid_1, keypoints_2, calibK_2, calibDC_2, valid_2):
if self.objtype == 0:
keypoint_center_1 = (keypoints_1[8] + keypoints_1[11]) / 2
keypoint_center_2 = (keypoints_2[8] + keypoints_2[11]) / 2
center_valid_1 = tf.logical_and(valid_1[8], valid_1[11])
center_valid_2 = tf.logical_and(valid_2[8], valid_2[11])
elif self.objtype == 1:
keypoint_center_1 = keypoints_1[12]
keypoint_center_2 = keypoints_2[12]
center_valid_1 = valid_1[12]
center_valid_2 = valid_2[12]
else:
raise NotImplementedError
valid_idx_1 = tf.where(valid_1)[:, 0]
valid_idx_2 = tf.where(valid_2)[:, 0]
valid_keypoints_1 = tf.gather(keypoints_1, valid_idx_1, name='1_valid_keypoints')
valid_keypoints_2 = tf.gather(keypoints_2, valid_idx_2, name='2_valid_keypoints')
min_coord_1 = tf.reduce_min(valid_keypoints_1, 0, name='1_min_coord')
min_coord_2 = tf.reduce_min(valid_keypoints_2, 0, name='2_min_coord')
max_coord_1 = tf.reduce_max(valid_keypoints_1, 0, name='1_max_coord')
max_coord_2 = tf.reduce_max(valid_keypoints_2, 0, name='2_max_coord')
keypoint_center_1 = tf.cond(center_valid_1, lambda: keypoint_center_1, lambda: (min_coord_1 + max_coord_1) / 2)
keypoint_center_2 = tf.cond(center_valid_2, lambda: keypoint_center_2, lambda: (min_coord_2 + max_coord_2) / 2)
keypoint_center_1.set_shape((3,))
keypoint_center_2.set_shape((3,))
fit_size_1 = tf.reduce_max(tf.maximum(max_coord_1 - keypoint_center_1, keypoint_center_1 - min_coord_1))
fit_size_2 = tf.reduce_max(tf.maximum(max_coord_2 - keypoint_center_2, keypoint_center_2 - min_coord_2))
crop_scale_noise_1 = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise_1 = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma))
crop_scale_noise_1 = tf.maximum(crop_scale_noise_1, tf.reciprocal(self.crop_size_zoom))
crop_scale_noise_2 = crop_scale_noise_1 + tf.truncated_normal([], mean=0.0, stddev=0.01)
crop_size_best_1 = tf.multiply(crop_scale_noise_1, 2 * fit_size_1 * self.crop_size_zoom, name='1_crop_size_best')
crop_size_best_2 = tf.multiply(crop_scale_noise_2, 2 * fit_size_2 * self.crop_size_zoom, name='2_crop_size_best')
crop_offset_noise_1 = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise_1 = tf.truncated_normal([3], mean=0.0, stddev=self.crop_offset_noise_sigma) * fit_size_1 * tf.constant([1., 1., 0.], dtype=tf.float32)
crop_offset_noise_2 = tf.truncated_normal([3], mean=0.0, stddev=0.01) * fit_size_2 * tf.constant([1., 1., 0.], dtype=tf.float32) + crop_offset_noise_1
crop_offset_noise_1 = tf.maximum(crop_offset_noise_1, max_coord_1 + 1e-5 - crop_size_best_1 / 2 - keypoint_center_1)
crop_offset_noise_2 = tf.maximum(crop_offset_noise_2, max_coord_2 + 1e-5 - crop_size_best_2 / 2 - keypoint_center_2)
crop_offset_noise_1 = tf.minimum(crop_offset_noise_1, min_coord_1 - 1e-5 + crop_size_best_1 / 2 - keypoint_center_1, name='1_crop_offset_noise')
crop_offset_noise_2 = tf.minimum(crop_offset_noise_2, min_coord_2 - 1e-5 + crop_size_best_2 / 2 - keypoint_center_2, name='2_crop_offset_noise')
crop_center_1 = tf.add(keypoint_center_1, crop_offset_noise_1, name='1_crop_center')
crop_center_2 = tf.add(keypoint_center_2, crop_offset_noise_2, name='2_crop_center')
crop_box_bl_1 = tf.concat([crop_center_1[:2] - crop_size_best_1 / 2, crop_center_1[2:]], 0)
crop_box_bl_2 = tf.concat([crop_center_2[:2] - crop_size_best_2 / 2, crop_center_2[2:]], 0)
crop_box_ur_1 = tf.concat([crop_center_1[:2] + crop_size_best_1 / 2, crop_center_1[2:]], 0)
crop_box_ur_2 = tf.concat([crop_center_2[:2] + crop_size_best_2 / 2, crop_center_2[2:]], 0)
crop_box_1 = tf.stack([crop_box_bl_1, crop_box_ur_1], 0)
crop_box_2 = tf.stack([crop_box_bl_2, crop_box_ur_2], 0)
scale_1 = tf.cast(self.grid_size, tf.float32) / crop_size_best_1
scale_2 = tf.cast(self.grid_size, tf.float32) / crop_size_best_2
crop_box2d_1, _ = self.project_tf(crop_box_1, calibK_1, calibDistCoef=calibDC_1)
crop_box2d_2, _ = self.project_tf(crop_box_2, calibK_2, calibDistCoef=calibDC_2)
min_coord2d_1 = tf.reduce_min(crop_box2d_1, 0)
min_coord2d_2 = tf.reduce_min(crop_box2d_2, 0)
max_coord2d_1 = tf.reduce_max(crop_box2d_1, 0)
max_coord2d_2 = tf.reduce_max(crop_box2d_2, 0)
crop_size_best2d_1 = tf.reduce_max(max_coord2d_1 - min_coord2d_1)
crop_size_best2d_2 = tf.reduce_max(max_coord2d_2 - min_coord2d_2)
crop_center2d_1 = (min_coord2d_1 + max_coord2d_1) / 2
crop_center2d_2 = (min_coord2d_2 + max_coord2d_2) / 2
scale2d_1 = tf.cast(self.crop_size, tf.float32) / crop_size_best2d_1
scale2d_2 = tf.cast(self.crop_size, tf.float32) / crop_size_best2d_2
return crop_center_1, scale_1, crop_center2d_1, scale2d_1, crop_center_2, scale_2, crop_center2d_2, scale2d_2
@staticmethod
def convertToSingleFrameDataWithPrevGT(data_dict):
out_dict = {}
out_dict['scoremap2d'] = data_dict['2_scoremap2d']
if '2_hand_valid' in data_dict:
out_dict['hand_valid'] = data_dict['2_hand_valid']
elif '2_body_valid' in data_dict:
out_dict['body_valid'] = data_dict['2_body_valid']
out_dict['PAF'] = data_dict['2_PAF']
out_dict['PAF_type'] = data_dict['2_PAF_type']
out_dict['mask_crop'] = data_dict['2_mask_crop']
out_dict['image_crop'] = tf.concat([data_dict['2_image_crop'], data_dict['1_image_crop'], data_dict['1_scoremap2d'], data_dict['1_PAF']], axis=3)
return out_dict
@staticmethod
def convertToSingleFrameDataWithPrevOutput(data_dict):
out_dict = {}
out_dict['scoremap2d'] = data_dict['2_scoremap2d']
if '2_hand_valid' in data_dict:
out_dict['hand_valid'] = data_dict['2_hand_valid']
elif '2_body_valid' in data_dict:
out_dict['body_valid'] = data_dict['2_body_valid']
out_dict['PAF'] = data_dict['2_PAF']
out_dict['PAF_type'] = data_dict['2_PAF_type']
out_dict['mask_crop'] = data_dict['2_mask_crop']
out_dict['image_crop'] = tf.concat([data_dict['2_image_crop'], data_dict['1_image_crop']], axis=3)
out_dict['pre_input'] = data_dict['pre_input']
out_dict['temp_data'] = data_dict['temp_data']
return out_dict
| body2hands-main | visualization/POF/data/TempConstReader.py |
# Run this script with OpenCV2
import cv2
import numpy as np
import os
import json
source_dir = '/media/posefs3b/Users/gines/mpii_mask'
target_dir = '/media/posefs3b/Users/donglaix/mpii_mask'
if __name__ == '__main__':
path_to_db = './MPII_collected.json'
with open(path_to_db) as f:
db_data = json.load(f)
total_num = len(db_data['img_paths'])
for i in range(total_num):
print ('processing image {} / {}'.format(i, total_num))
bbox = np.array(db_data['bbox'][i], dtype=np.float32)
bbox_other = np.array(db_data['bbox_other'][i], dtype=np.float32).reshape(-1, 4)
x = (bbox[0] + bbox[2]) / 2
y = (bbox[1] + bbox[3]) / 2
img_path = db_data['img_paths'][i]
source_mask = os.path.join('/media/posefs3b/Users/gines/mpii_mask', img_path)
mask = cv2.imread(source_mask)
mask = (mask[:, :, 0] >= 128).astype(np.uint8) # the stored data are 0 ~ 255, convert to bool
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
belong = []
for cnt in contours:
x1 = np.amin(cnt[:, 0, 0])
x2 = np.amax(cnt[:, 0, 0])
y1 = np.amin(cnt[:, 0, 1])
y2 = np.amax(cnt[:, 0, 1])
if x < x1 or x > x2 or y < y1 or y > y2:
belong.append(False)
continue
# the center is inside this contour, now check the all other bounding boxes
xo = (bbox_other[:, 0] + bbox_other[:, 2]) / 2
yo = (bbox_other[:, 1] + bbox_other[:, 3]) / 2
if ((xo >= x1) * (xo <= x2) * (yo >= y1) * (yo <= y2)).any(): # the center of any other bounding boxes fall inside
belong.append(False)
else:
belong.append(True) # the center of current bbox is in and others are not in.
assert len(belong) == len(contours)
new_mask = np.ones(mask.shape, dtype=np.uint8)
cv2.drawContours(new_mask, [cnt for TF, cnt in zip(belong, contours) if not TF], -1, 0, -1)
cv2.imwrite(os.path.join(target_dir, '{:05d}.png'.format(i)), new_mask)
| body2hands-main | visualization/POF/data/process_MPII_mask.py |
import tensorflow as tf
import numpy as np
import utils.general
class BaseReader(object):
# BaseReader is a virual base class to be inherited by other data readers which provide data by calling register_tensor
crop_size_zoom = 1.5
crop_size_zoom_2d = 1.8
crop_size = 368
grid_size = crop_size // 8
sigma = 7
sigma3d = 3
rotate_augmentation = False
blur_augmentation = False
crop_scale_noise_sigma = 0.1
crop_offset_noise_sigma = 0.1
crop_scale_noise_sigma_2d = 0.1
crop_offset_noise_sigma_2d = 0.1
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
# objtype: 0 = body only, 1 = hand only, 2 = body and hands
assert objtype in (0, 1, 2)
self.objtype = objtype
self.shuffle = shuffle
self.batch_size = batch_size
self.crop_noise = crop_noise
def register_tensor(self, data_dict, order_dict):
l = [len(value) for value in data_dict.values() if len(value) > 0]
assert len(set(l)) == 1 # check the length of all data items to be consistent
print('loading dataset of size {}'.format(l[0]))
self.tensor_dict = {}
for key, value in data_dict.items():
if len(value) > 0:
value = np.array(value)
if key in order_dict:
self.tensor_dict[key] = self.switch_joint_order(value, order_dict[key])
else:
self.tensor_dict[key] = value
def get(self, withPAF=True, PAF_normalize3d=True, read_image=True, imw=1920, imh=1080, bbox2d=0):
# bbox2d: 0: computed from 3D bounding box, 1: compute from openpose
assert bbox2d in (0, 1)
assert type(withPAF) == bool
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
data_dict['K'] = flow_dict['K']
# rotate and project to camera frame
if self.objtype == 0:
body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
body3d = tf.cast(body3d, tf.float32)
body2d = tf.cast(body2d, tf.float32)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_uv_origin'] = body2d
data_dict['body_valid'] = flow_dict['body_valid']
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d, hand3d = self.project_tf(hand3d, flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef']) # in camera coordinate
hand3d = tf.cast(hand3d, tf.float32)
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_uv_origin'] = hand2d
data_dict['cond_left'] = cond_left
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
elif self.objtype == 2:
body2d, body3d = self.project_tf(flow_dict['body'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
lhand2d, lhand3d = self.project_tf(flow_dict['left_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
rhand2d, rhand3d = self.project_tf(flow_dict['right_hand'], flow_dict['K'], flow_dict['R'], flow_dict['t'], flow_dict['distCoef'])
data_dict['body_xyz_origin'] = body3d
data_dict['body_uv_origin'] = body2d
data_dict['lhand_xyz_origin'] = lhand3d
data_dict['lhand_uv_origin'] = lhand2d
data_dict['rhand_xyz_origin'] = rhand3d
data_dict['rhand_uv_origin'] = rhand2d
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
data_dict['mask'] = mask
# calculate crop size
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body3d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand3d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
crop_center3d, scale3d, crop_center2d, scale2d = self.calc_crop_scale(keypoints, flow_dict['K'], flow_dict['distCoef'], valid)
data_dict['crop_center2d'], data_dict['scale2d'] = crop_center2d, scale2d
data_dict['crop_center3d'], data_dict['scale3d'] = crop_center3d, scale3d
# do cropping
if self.objtype == 1:
body2d = hand2d
body3d = hand3d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0, tf.sin(rotate_angle), tf.cos(rotate_angle), 0, 0, 0, 1]), [3, 3])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
body3d = tf.matmul((body3d - crop_center3d), R3) + crop_center3d
data_dict['keypoint_xyz_origin'] = body3d # note that the projection of 3D might not be aligned with 2D any more after rotation
data_dict['keypoint_uv_origin'] = body2d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), PAF_normalize3d, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
# create 3D gaussian_map
body3d_local = self.update_keypoint3d(body3d, crop_center3d, scale3d)
data_dict['keypoint_xyz_local'] = body3d_local
# scoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d, valid_vec=valid, extra=True)
# data_dict['scoremap3d'] = scoremap3d
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
elif self.objtype == 2:
bcrop_center3d, bscale3d, bcrop_center2d, bscale2d = self.calc_crop_scale(body3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['body_valid'])
lcrop_center3d, lscale3d, lcrop_center2d, lscale2d = self.calc_crop_scale(lhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['left_hand_valid'])
rcrop_center3d, rscale3d, rcrop_center2d, rscale2d = self.calc_crop_scale(rhand3d, flow_dict['K'], flow_dict['distCoef'], flow_dict['right_hand_valid'])
body3d_local = self.update_keypoint3d(body3d, bcrop_center3d, bscale3d)
lhand3d_local = self.update_keypoint3d(lhand3d, lcrop_center3d, lscale3d)
rhand3d_local = self.update_keypoint3d(rhand3d, rcrop_center3d, rscale3d)
bscoremap3d = self.create_multiple_gaussian_map_3d(body3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['body_valid'], extra=True) # coord_hw, imsize_hw
lscoremap3d = self.create_multiple_gaussian_map_3d(lhand3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['left_hand_valid'], extra=True) # coord_hw, imsize_hw
rscoremap3d = self.create_multiple_gaussian_map_3d(rhand3d_local, self.grid_size, self.sigma3d,
valid_vec=flow_dict['right_hand_valid'], extra=True) # coord_hw, imsize_hw
data_dict['bscoremap3d'] = bscoremap3d
data_dict['lscoremap3d'] = lscoremap3d
data_dict['rscoremap3d'] = rscoremap3d
data_dict['body_xyz_local'] = body3d_local
data_dict['lhand_xyz_local'] = lhand3d_local
data_dict['rhand_xyz_local'] = rhand3d_local
# 2D keypoints and cropped images
if bbox2d == 1:
# crop the 2D bounding box from openpose data
body2d = flow_dict['openpose_body']
lhand2d = flow_dict['openpose_lhand']
rhand2d = flow_dict['openpose_rhand']
bvalid = tf.logical_and(tf.not_equal(body2d[:, 0], 0.0), tf.not_equal(body2d[:, 1], 0.0))
lvalid = tf.logical_and(tf.not_equal(lhand2d[:, 0], 0.0), tf.not_equal(lhand2d[:, 1], 0.0))
rvalid = tf.logical_and(tf.not_equal(rhand2d[:, 0], 0.0), tf.not_equal(rhand2d[:, 1], 0.0))
data_dict['body_valid'] = bvalid
data_dict['left_hand_valid'] = lvalid
data_dict['right_hand_valid'] = rvalid
if 'openpose_foot' in flow_dict:
data_dict['openpose_foot'] = flow_dict['openpose_foot']
bcrop_center2d, bscale2d = self.calc_crop_scale2d(body2d, bvalid)
lcrop_center2d, lscale2d = self.calc_crop_scale2d(lhand2d, lvalid)
rcrop_center2d, rscale2d = self.calc_crop_scale2d(rhand2d, rvalid)
body2d_local = self.update_keypoint2d(body2d, bcrop_center2d, bscale2d)
lhand2d_local = self.update_keypoint2d(lhand2d, lcrop_center2d, lscale2d)
rhand2d_local = self.update_keypoint2d(rhand2d, rcrop_center2d, rscale2d)
data_dict['body_uv_local'] = body2d_local
data_dict['lhand_uv_local'] = lhand2d_local
data_dict['rhand_uv_local'] = rhand2d_local
data_dict['bcrop_center2d'] = bcrop_center2d
data_dict['lcrop_center2d'] = lcrop_center2d
data_dict['rcrop_center2d'] = rcrop_center2d
data_dict['bscale2d'] = bscale2d
data_dict['lscale2d'] = lscale2d
data_dict['rscale2d'] = rscale2d
if read_image:
bimage_crop = self.crop_image(image, bcrop_center2d, bscale2d)
limage_crop = self.crop_image(image, lcrop_center2d, lscale2d)
rimage_crop = self.crop_image(image, rcrop_center2d, rscale2d)
data_dict['bimage_crop'] = bimage_crop
data_dict['limage_crop'] = limage_crop
data_dict['rimage_crop'] = rimage_crop
bscoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['body_valid'], extra=True) # coord_hw, imsize_hw
lscoremap2d = self.create_multiple_gaussian_map(lhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['left_hand_valid'], extra=True) # coord_hw, imsize_hw
rscoremap2d = self.create_multiple_gaussian_map(rhand2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma,
valid_vec=flow_dict['right_hand_valid'], extra=True) # coord_hw, imsize_hw
data_dict['bscoremap2d'] = bscoremap2d
data_dict['lscoremap2d'] = lscoremap2d
data_dict['rscoremap2d'] = rscoremap2d
# for openpose data
for key, val in flow_dict.items():
if 'openpose' not in key:
continue
data_dict[key] = val
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=20,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=20,
enqueue_many=False)
return dict(zip(names, tensors))
def calc_crop_scale(self, keypoints, calibK, calibDC, valid):
if self.objtype == 0:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
elif self.objtype == 1:
keypoint_center = keypoints[12]
center_valid = valid[12]
else: # objtype == 2
assert self.objtype == 2 # conditioned by the shape of input
if keypoints.shape[0] == 18:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
else:
keypoint_center = keypoints[12]
center_valid = valid[12]
valid_idx = tf.where(valid)[:, 0]
valid_keypoints = tf.gather(keypoints, valid_idx, name='valid_keypoints')
min_coord = tf.reduce_min(valid_keypoints, 0, name='min_coord')
max_coord = tf.reduce_max(valid_keypoints, 0, name='max_coord')
keypoint_center = tf.cond(center_valid, lambda: keypoint_center, lambda: (min_coord + max_coord) / 2)
keypoint_center.set_shape((3,))
fit_size = tf.reduce_max(tf.maximum(max_coord - keypoint_center, keypoint_center - min_coord))
crop_scale_noise = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma))
crop_scale_noise = tf.maximum(crop_scale_noise, tf.reciprocal(self.crop_size_zoom))
crop_size_best = tf.multiply(crop_scale_noise, 2 * fit_size * self.crop_size_zoom, name='crop_size_best')
crop_offset_noise = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise = tf.truncated_normal([3], mean=0.0, stddev=self.crop_offset_noise_sigma) * fit_size * tf.constant([1., 1., 0.], dtype=tf.float32)
crop_offset_noise = tf.maximum(crop_offset_noise, max_coord + 1e-5 - crop_size_best / 2 - keypoint_center)
crop_offset_noise = tf.minimum(crop_offset_noise, min_coord - 1e-5 + crop_size_best / 2 - keypoint_center, name='crop_offset_noise')
crop_center = tf.add(keypoint_center, crop_offset_noise, name='crop_center')
crop_box_bl = tf.concat([crop_center[:2] - crop_size_best / 2, crop_center[2:]], 0)
crop_box_ur = tf.concat([crop_center[:2] + crop_size_best / 2, crop_center[2:]], 0)
crop_box = tf.stack([crop_box_bl, crop_box_ur], 0)
scale = tf.cast(self.grid_size, tf.float32) / crop_size_best
crop_box2d, _ = self.project_tf(crop_box, calibK, calibDistCoef=calibDC)
min_coord2d = tf.reduce_min(crop_box2d, 0)
max_coord2d = tf.reduce_max(crop_box2d, 0)
crop_size_best2d = tf.reduce_max(max_coord2d - min_coord2d)
crop_center2d = (min_coord2d + max_coord2d) / 2
scale2d = tf.cast(self.crop_size, tf.float32) / crop_size_best2d
return crop_center, scale, crop_center2d, scale2d
def calc_crop_scale2d(self, keypoints, valid):
# assert self.objtype == 2
if keypoints.shape[0] == 19 or keypoints.shape[0] == 20:
keypoint_center = (keypoints[8] + keypoints[11]) / 2
center_valid = tf.logical_and(valid[8], valid[11])
else:
keypoint_center = keypoints[12]
center_valid = valid[12]
valid_idx = tf.where(valid)[:, 0]
valid_keypoints = tf.gather(keypoints, valid_idx)
min_coord = tf.reduce_min(valid_keypoints, 0)
max_coord = tf.reduce_max(valid_keypoints, 0)
keypoint_center = tf.cond(center_valid, lambda: keypoint_center, lambda: (min_coord + max_coord) / 2)
keypoint_center.set_shape((2,))
fit_size = tf.reduce_max(tf.maximum(max_coord - keypoint_center, keypoint_center - min_coord))
crop_scale_noise = tf.cast(1.0, tf.float32)
if self.crop_noise:
crop_scale_noise = tf.exp(tf.truncated_normal([], mean=0.0, stddev=self.crop_scale_noise_sigma_2d))
crop_size_best = 2 * fit_size * self.crop_size_zoom_2d * crop_scale_noise
crop_offset_noise = tf.cast(0.0, tf.float32)
if self.crop_noise:
crop_offset_noise = tf.truncated_normal([2], mean=0.0, stddev=self.crop_offset_noise_sigma_2d) * fit_size
crop_offset_noise = tf.maximum(crop_offset_noise, keypoint_center - crop_size_best / 2 - min_coord + 1)
crop_offset_noise = tf.minimum(crop_offset_noise, keypoint_center + crop_size_best / 2 - max_coord - 1)
crop_center = keypoint_center + crop_offset_noise
scale2d = tf.cast(self.crop_size, tf.float32) / crop_size_best
return crop_center, scale2d
def crop_image(self, image, crop_center2d, scale2d):
image_crop = utils.general.crop_image_from_xy(tf.expand_dims(image, 0), crop_center2d[::-1], self.crop_size, scale2d) # crop_center_hw
image_crop = tf.squeeze(image_crop)
return image_crop
def update_keypoint2d(self, keypoint2d, crop_center2d, scale2d):
keypoint_x = (keypoint2d[:, 0] - crop_center2d[0]) * scale2d + self.crop_size // 2
keypoint_y = (keypoint2d[:, 1] - crop_center2d[1]) * scale2d + self.crop_size // 2
keypoint2d_local = tf.stack([keypoint_x, keypoint_y], 1)
keypoint2d_local = keypoint2d_local
return keypoint2d_local
def update_keypoint3d(self, keypoint3d, crop_center3d, scale3d):
keypoint_x = (keypoint3d[:, 0] - crop_center3d[0]) * scale3d + self.grid_size // 2
keypoint_y = (keypoint3d[:, 1] - crop_center3d[1]) * scale3d + self.grid_size // 2
keypoint_z = (keypoint3d[:, 2] - crop_center3d[2]) * scale3d + self.grid_size // 2
keypoint3d_local = tf.stack([keypoint_x, keypoint_y, keypoint_z], 1)
return keypoint3d_local
@staticmethod
def project_tf(joint3d, calibK, calibR=None, calibt=None, calibDistCoef=None):
""" This function projects the 3D hand to 2D using camera parameters
"""
with tf.name_scope('project_tf'):
x = joint3d
if calibR is not None:
x = tf.matmul(joint3d, calibR, transpose_b=True)
if calibt is not None:
x = x + calibt
xi = tf.divide(x[:, 0], x[:, 2])
yi = tf.divide(x[:, 1], x[:, 2])
if calibDistCoef is not None:
X2 = xi * xi
Y2 = yi * yi
XY = X2 * Y2
R2 = X2 + Y2
R4 = R2 * R2
R6 = R4 * R2
dc = calibDistCoef
radial = 1.0 + dc[0] * R2 + dc[1] * R4 + dc[4] * R6
tan_x = 2.0 * dc[2] * XY + dc[3] * (R2 + 2.0 * X2)
tan_y = 2.0 * dc[3] * XY + dc[2] * (R2 + 2.0 * Y2)
xi = radial * xi + tan_x
yi = radial * yi + tan_y
xp = tf.transpose(tf.stack([xi, yi], axis=0))
pt = tf.matmul(xp, calibK[:2, :2], transpose_b=True) + calibK[:2, 2]
return pt, x
@staticmethod
def switch_joint_order(keypoint, order):
# reorder the joints to the order used in our network
assert len(order.shape) == 1, 'order must be 1-dim'
# axis 0: sample, axis 1: keypoint order, axis 2: xyz
return keypoint[:, order, ...]
@staticmethod
def create_multiple_gaussian_map(coords_wh, output_size, sigma, valid_vec=None, extra=False):
""" Creates a map of size (output_shape[0], output_shape[1]) at (center[0], center[1])
with variance sigma for multiple coordinates."""
with tf.name_scope('create_multiple_gaussian_map'):
sigma = tf.cast(sigma, tf.float32)
assert len(output_size) == 2
s = coords_wh.get_shape().as_list()
coords_wh = tf.cast(coords_wh, tf.int32)
if valid_vec is not None:
valid_vec = tf.cast(valid_vec, tf.float32)
valid_vec = tf.squeeze(valid_vec)
cond_val = tf.greater(valid_vec, 0.5)
else:
cond_val = tf.ones_like(coords_wh[:, 0], dtype=tf.float32)
cond_val = tf.greater(cond_val, 0.5)
cond_1_in = tf.logical_and(tf.less(coords_wh[:, 0], output_size[0] - 1), tf.greater(coords_wh[:, 0], 0))
cond_2_in = tf.logical_and(tf.less(coords_wh[:, 1], output_size[1] - 1), tf.greater(coords_wh[:, 1], 0))
cond_in = tf.logical_and(cond_1_in, cond_2_in)
cond = tf.logical_and(cond_val, cond_in)
coords_wh = tf.cast(coords_wh, tf.float32)
# create meshgrid
x_range = tf.expand_dims(tf.range(output_size[0]), 1)
y_range = tf.expand_dims(tf.range(output_size[1]), 0)
X = tf.cast(tf.tile(x_range, [1, output_size[1]]), tf.float32)
Y = tf.cast(tf.tile(y_range, [output_size[0], 1]), tf.float32)
X.set_shape((output_size[0], output_size[1]))
Y.set_shape((output_size[0], output_size[1]))
X = tf.expand_dims(X, -1)
Y = tf.expand_dims(Y, -1)
X_b = tf.tile(X, [1, 1, s[0]])
Y_b = tf.tile(Y, [1, 1, s[0]])
X_b -= coords_wh[:, 0]
Y_b -= coords_wh[:, 1]
dist = tf.square(X_b) + tf.square(Y_b)
scoremap = tf.exp(-dist / (2 * tf.square(sigma))) * tf.cast(cond, tf.float32)
if extra:
negative = 1 - tf.reduce_sum(scoremap, axis=2, keep_dims=True)
negative = tf.minimum(tf.maximum(negative, 0.0), 1.0)
scoremap = tf.concat([scoremap, negative], axis=2)
return scoremap
@staticmethod
def create_multiple_gaussian_map_3d(keypoint_3d, output_size, sigma3d, valid_vec=None, extra=False):
""" Creates a 3D heatmap for the hand skeleton
"""
with tf.name_scope('create_multiple_gaussian_map_3d'):
if valid_vec is not None:
valid_vec = tf.cast(valid_vec, tf.float32)
valid_vec = tf.squeeze(valid_vec)
cond_val = tf.greater(valid_vec, 0.5)
else:
cond_val = tf.ones_like(keypoint_3d[:, 0], dtype=tf.float32)
cond_val = tf.greater(cond_val, 0.5)
sigma3d = tf.cast(sigma3d, tf.float32)
# reverse the order of axis: tensorflow uses NDHWC
reverse = keypoint_3d[:, ::-1]
z_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 1), 2)
y_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 0), 2)
x_range = tf.expand_dims(tf.expand_dims(tf.range(output_size, dtype=tf.float32), 0), 1)
Z = tf.tile(z_range, [1, output_size, output_size])
Y = tf.tile(y_range, [output_size, 1, output_size])
X = tf.tile(x_range, [output_size, output_size, 1])
Z = tf.expand_dims(Z, -1)
Y = tf.expand_dims(Y, -1)
X = tf.expand_dims(X, -1)
s = reverse.get_shape().as_list()
Z_b = tf.tile(Z, [1, 1, 1, s[0]])
Y_b = tf.tile(Y, [1, 1, 1, s[0]])
X_b = tf.tile(X, [1, 1, 1, s[0]])
Z_b -= reverse[:, 0]
Y_b -= reverse[:, 1]
X_b -= reverse[:, 2]
dist = tf.square(X_b) + tf.square(Y_b) + tf.square(Z_b)
scoremap_3d = tf.exp(-dist / (2 * tf.square(sigma3d))) * tf.cast(cond_val, tf.float32)
if extra:
negative = 1 - tf.reduce_sum(scoremap_3d, axis=3, keep_dims=True)
negative = tf.minimum(tf.maximum(negative, 0.0), 1.0)
scoremap_3d = tf.concat([scoremap_3d, negative], axis=3)
return scoremap_3d
def start_from(self, idx):
for key, value in self.tensor_dict.items():
if value.size > 0:
self.tensor_dict[key] = value[idx:]
| body2hands-main | visualization/POF/data/BaseReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import h5py
from utils.keypoint_conversion import human36m_to_main, mpi3d_to_main, SMPL_to_main
import pickle
import os
class HumanReader(BaseReader):
def __init__(self, name='Human3.6M', mode='training', objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(HumanReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert objtype == 0 # this dataset reader only supports human body data
assert mode in ('training', 'evaluation')
self.mode = mode
assert name in ('Human3.6M', 'MPI_INF_3DHP', 'UP', 'SURREAL')
self.name = name
if name == 'Human3.6M':
self.image_root = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/images/'
if self.mode == 'training':
image_list_file = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/train_images.txt'
path_to_db = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/train.h5'
else:
image_list_file = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/valid_images.txt'
path_to_db = '/media/posefs1b/Users/donglaix/c2f-vol-train/data/h36m/annot/valid.h5'
path_to_calib = '/media/posefs3b/Users/donglaix/h36m/cameras.h5'
with open(image_list_file) as f:
img_list = [_.strip() for _ in f]
fannot = h5py.File(path_to_db, 'r')
annot3d = fannot['S'][:]
annot2d = fannot['part'][:]
fannot.close()
fcalib = h5py.File(path_to_calib, 'r')
calib_data = {}
map_camera = {'54138969': 'camera1', '55011271': 'camera2', '58860488': 'camera3', '60457274': 'camera4'}
for pid in fcalib.keys():
if pid == '3dtest':
continue
person_cam_data = {}
for camera in map_camera.values():
cam_data = {_: fcalib[pid][camera][_][:] for _ in fcalib[pid][camera].keys()}
person_cam_data[camera] = cam_data
calib_data[pid] = person_cam_data
fcalib.close()
human3d = {'body': [], 'left_hand': [], 'right_hand': [], 'gt_body': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
img_dirs = []
for img_idx, img_name in enumerate(img_list):
img_dir = os.path.join(self.image_root, img_name)
body2d = annot2d[img_idx].astype(np.float32)
if mode == 'training' and (body2d >= 1000).any() or (body2d <= 0).any():
continue
body3d = annot3d[img_idx].astype(np.float32)
human3d['gt_body'].append(body3d)
body3d = np.concatenate((body3d, np.ones((1, 3), dtype=np.float32)), axis=0) # put dummy values in order_dict
person = img_name.split('_')[0].replace('S', 'subject')
camera = img_name.split('.')[1].split('_')[0]
camera_name = map_camera[camera]
cam_param = calib_data[person][camera_name]
K = np.eye(3)
K[0, 0] = cam_param['f'][0, 0]
K[1, 1] = cam_param['f'][1, 0]
K[0, 2] = cam_param['c'][0, 0]
K[1, 2] = cam_param['c'][1, 0]
dc = np.zeros((5,))
dc[:3] = cam_param['k'][:, 0]
dc[3:] = cam_param['p'][:, 0]
human3d['body'].append(body3d)
img_dirs.append(img_dir)
calib['K'].append(K.astype(np.float32))
calib['R'].append(np.eye(3, dtype=np.float32))
calib['t'].append(np.zeros((3,), dtype=np.float32))
calib['distCoef'].append(dc.astype(np.float32))
self.num_samples = len(img_dirs)
human3d.update(calib)
human3d['img_dirs'] = img_dirs
body_valid = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
human3d['body_valid'] = np.tile(body_valid, (self.num_samples, 1))
order_dict = human36m_to_main
elif name == 'MPI_INF_3DHP':
self.image_root = '/media/posefs1b/Users/donglaix/mpi_inf_3dhp/'
assert mode == 'training'
self.path_to_db = self.image_root + 'mpi3d.pickle'
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f)
img_dirs, body, K = db_data
self.num_samples = img_dirs.shape[0]
img_dirs = np.core.defchararray.add(np.array([self.image_root]), img_dirs)
body = body.astype(np.float32)
body = np.concatenate([body, np.ones((self.num_samples, 1, 3))], axis=1).astype(np.float32)
K = K.astype(np.float32)
body_valid = np.ones((self.num_samples, 19), dtype=bool)
body_valid[:, 0] = False
body_valid[:, 14:18] = False
R = np.tile(np.expand_dims(np.eye(3, dtype=np.float32), axis=0), (self.num_samples, 1, 1))
t = np.tile(np.zeros((1, 3), dtype=np.float32), (self.num_samples, 1))
dc = np.tile(np.zeros((1, 5), dtype=np.float32), (self.num_samples, 1))
human3d = {'img_dirs': img_dirs, 'body': body, 'K': K, 'body_valid': body_valid, 'R': R, 't': t, 'distCoef': dc}
order_dict = mpi3d_to_main
elif name == 'UP':
self.image_root = '/media/posefs3b/Users/donglaix/UP/'
assert mode in 'training'
self.path_to_db = './data/UP_collected.pkl'
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f, encoding='latin')
human3d = {'body': [], 'img_dirs': [], 'body_valid': [], 'mask_dirs': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
for data in db_data:
calib['K'].append(data['K'].astype(np.float32))
calib['R'].append(data['R'].astype(np.float32))
calib['t'].append(data['t'].astype(np.float32))
calib['distCoef'].append(np.zeros([5], dtype=np.float32))
human3d['body'].append(data['J'].astype(np.float32))
body_valid = np.ones([19], dtype=bool)
# body_valid[0] = False
# body_valid[14:] = False
human3d['body_valid'].append(body_valid)
human3d['img_dirs'].append(os.path.join(self.image_root, data['img_dir']))
human3d['mask_dirs'].append(os.path.join(self.image_root, data['mask']))
human3d.update(calib)
order_dict = SMPL_to_main
self.num_samples = len(human3d['img_dirs'])
elif name == 'SURREAL':
self.image_root = '/media/posefs3b/Users/donglaix/surreal/surreal/SURREAL/'
assert mode in 'training'
self.path_to_db = os.path.join(self.image_root, 'surreal_collected.pkl')
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f, encoding='latin')
human3d = {'body': [], 'img_dirs': [], 'body_valid': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
for data in db_data:
calib['K'].append(data['K'].astype(np.float32))
calib['R'].append(data['R'].astype(np.float32))
calib['t'].append(np.ravel(data['t']).astype(np.float32))
calib['distCoef'].append(np.zeros([5], dtype=np.float32))
human3d['body'].append(data['J'].astype(np.float32))
body_valid = np.ones([19], dtype=bool)
body_valid[0] = False
body_valid[14:] = False
human3d['body_valid'].append(body_valid)
human3d['img_dirs'].append(os.path.join(self.image_root, data['img_dir']))
human3d.update(calib)
order_dict = SMPL_to_main
self.num_samples = len(human3d['img_dirs'])
else:
raise NotImplementedError
self.register_tensor(human3d, order_dict)
def get(self):
if self.name == 'Human3.6M':
d = super(HumanReader, self).get(withPAF=True, imw=1000, imh=1002)
elif self.name == 'MPI_INF_3DHP':
d = super(HumanReader, self).get(withPAF=True, imw=2048, imh=2048)
elif self.name == 'UP':
d = super(HumanReader, self).get(withPAF=True, imw=1920, imh=1080)
elif self.name == 'SURREAL':
d = super(HumanReader, self).get(withPAF=True, imw=320, imh=240)
else:
raise NotImplementedError
return d
if __name__ == '__main__':
d = HumanReader(mode='evaluation', name='Human3.6M', shuffle=False, objtype=0, crop_noise=False)
d.start_from(77095)
# d.crop_size_zoom = 1.5
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_all_PAF
# from utils.vis_heatmap3d import vis_heatmap3d
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
bimage_crop, img_dir, body2d, body_valid, body2d_heatmap, body3d, PAF, mask_crop = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['body_valid'], data_dict['scoremap2d'],
data_dict['keypoint_xyz_local'], data_dict['PAF'], data_dict['mask_crop']])
image_name = img_dir[0].decode()
print(image_name)
image_v = ((bimage_crop[0] + 0.5) * 255).astype(np.uint8)
body2d = np.squeeze(body2d)
body_valid = np.squeeze(body_valid)
body2d_heatmap = np.squeeze(body2d_heatmap)
body3d = np.squeeze(body3d)
mask_crop = np.squeeze(mask_crop).astype(bool)
PAF = np.squeeze(PAF)
body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:19, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(161)
plt.imshow(image_v)
utils.general.plot2d(ax1, body2d, valid_idx=body_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, body2d_detected, valid_idx=body_valid, color=np.array([0.0, 0.0, 1.0]))
for i in range(19):
plt.text(int(body2d[i, 0]), int(body2d[i, 1]), str(i))
ax2 = fig.add_subplot(162, projection='3d')
utils.general.plot3d(ax2, body3d, valid_idx=body_valid, color=np.array([1.0, 0.0, 0.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
ax2.set_xlim(0, 47)
ax2.set_ylim(0, 47)
ax2.set_zlim(0, 47)
ax3 = fig.add_subplot(163)
plt.imshow(mask_crop)
ax4 = fig.add_subplot(164)
mask_3c = np.stack([mask_crop] * 3, axis=2)
masked = mask_3c * image_v
plt.imshow(masked)
xy, z = plot_all_PAF(PAF, 3)
ax5 = fig.add_subplot(165)
ax5.imshow(xy)
ax6 = fig.add_subplot(166)
ax6.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/HumanReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class Base2DReader(BaseReader):
# inherit from BaseReader, implement different 2D cropping (cropping from 2D)
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(Base2DReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
assert type(withPAF) == bool
assert self.objtype in (0, 1)
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
PAF_given = False
if self.objtype == 0:
body2d = flow_dict['body']
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['keypoint_uv_origin'] = body2d
if 'body_3d' in flow_dict:
data_dict['keypoint_xyz_origin'] = flow_dict['body_3d']
data_dict['keypoint_xyz_local'] = flow_dict['body_3d']
PAF_given = True
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand2d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_uv_origin'] = hand2d
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
if 'left_hand_3d' in flow_dict and 'right_hand_3d' in flow_dict:
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand_3d'], lambda: flow_dict['right_hand_3d'])
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_xyz_local'] = hand3d
PAF_given = True
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
if 'other_bbox' in flow_dict:
ob = flow_dict['other_bbox']
Xindmap = tf.tile(tf.expand_dims(tf.range(imw, dtype=tf.int32), 0), [imh, 1])
Xindmap = tf.tile(tf.expand_dims(Xindmap, 2), [1, 1, 20])
Yindmap = tf.tile(tf.expand_dims(tf.range(imh, dtype=tf.int32), 1), [1, imw])
Yindmap = tf.tile(tf.expand_dims(Yindmap, 2), [1, 1, 20])
x_out = tf.logical_or(tf.less(Xindmap, ob[:, 0]), tf.greater_equal(Xindmap, ob[:, 2]))
y_out = tf.logical_or(tf.less(Yindmap, ob[:, 1]), tf.greater_equal(Yindmap, ob[:, 3]))
out = tf.cast(tf.logical_or(x_out, y_out), tf.float32)
out = tf.reduce_min(out, axis=2)
mask = tf.minimum(mask, out)
data_dict['mask'] = mask
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body2d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand2d
body2d = hand2d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
if PAF_given:
body3d = hand3d
crop_center2d, scale2d = self.calc_crop_scale2d(keypoints, valid)
data_dict['crop_center2d'] = crop_center2d
data_dict['scale2d'] = scale2d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
data_dict['keypoint_uv_origin'] = body2d
if PAF_given:
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0., tf.sin(rotate_angle), tf.cos(rotate_angle), 0., 0., 0., 1.]), [3, 3])
body3d = tf.matmul(body3d, R3)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_xyz_local'] = body3d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
num_keypoint = body2d_local.get_shape().as_list()[0]
zeros = tf.zeros([num_keypoint, 1], dtype=tf.float32)
if PAF_given:
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), normalize_3d=True, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
else:
data_dict['PAF'] = createPAF(body2d_local, tf.concat([body2d, zeros], axis=1), self.objtype, (self.crop_size, self.crop_size), normalize_3d=False, valid_vec=valid)
data_dict['PAF_type'] = tf.zeros([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=50,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
enqueue_many=False)
return dict(zip(names, tensors))
| body2hands-main | visualization/POF/data/Base2DReader.py |
import tensorflow as tf
from data.TempConstReader import TempConstReader
import numpy as np
import numpy.linalg as nl
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class DomeReaderTempConst(TempConstReader):
def __init__(self, mode='training', objtype=0, shuffle=False, batch_size=1, crop_noise=False, full_only=True, head_top=True):
super(DomeReaderTempConst, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
assert objtype in (0, 1)
self.image_root = '/media/posefs0c/panopticdb/'
# read data from a4plus with consecutive frames
path_to_db = './data/a4plus_collected.pkl'
path_to_calib = './data/camera_data_a4.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
with open('./data/a4_hands_annotated.txt') as f:
hand_annots = {}
for line in f:
strs = line.split()
hand_annots[tuple(strs[:3])] = eval(strs[3])
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
human3d = {'1_body': [], '1_left_hand': [], '1_right_hand': [], '1_body_valid': [], 'left_hand_valid': [], 'right_hand_valid': [],
'2_body': [], '2_left_hand': [], '2_right_hand': [], '2_body_valid': []}
calib = {'1_K': [], '1_R': [], '1_t': [], '1_distCoef': [],
'2_K': [], '2_R': [], '2_t': [], '2_distCoef': []}
img_dirs_1 = []
img_dirs_2 = []
map_next = {}
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5: # a4plus is sampled 1 out of 5, frame number *0 and *5 is the first frame, *1 and *6 is the second frame
continue
map_next[(seqName, frame_str)] = None
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5 != 1:
continue
prev_key = (seqName, '{:08d}'.format(frame - 1))
if prev_key not in map_next:
continue
map_next[prev_key] = data3d
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
frame = int(frame_str)
if frame % 5:
continue
# check for manual annotation, remove the annotation if the hand is annotated as incorrect.
if 'left_hand' in data3d and not hand_annots[(seqName, frame_str, 'left')]:
del data3d['left_hand']
if 'right_hand' in data3d and not hand_annots[(seqName, frame_str, 'righ')]:
del data3d['right_hand']
next_data = map_next[(seqName, frame_str)]
if next_data is None:
continue
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d_1 = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
body3d = np.array(next_data['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d_2 = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d_1 = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'left_hand' in next_data:
left_hand3d_2 = np.array(next_data['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d_1 = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in next_data:
right_hand3d_2 = np.array(next_data['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d or 'left_hand' not in next_data) and ('right_hand' not in data3d or 'right_hand' not in next_data):
# one hand must be valid for both frames
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if camIdx not in next_data['body']['2D']: # no data from this camera in the next frame
continue
if full_only:
cond_inside_1 = all(camDict['insideImg'])
cond_inside_2 = all(next_data['body'][camIdx]['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio_1 = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
inside_ratio_2 = np.float(np.sum(next_data['body']['2D'][camIdx]['insideImg'])) / len(next_data['body']['2D'][camIdx]['insideImg'])
cond_inside_1 = (inside_ratio_1 > 0.1)
cond_inside_2 = (inside_ratio_2 > 0.1)
if any(camDict['occluded']) or any(next_data['body']['2D'][camIdx]['occluded']) or not cond_inside_1 or not cond_inside_2:
continue
human3d['1_body'].append(body3d_1)
human3d['2_body'].append(body3d_2)
human3d['1_body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
human3d['2_body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
elif objtype == 1:
if 'left_hand' in data3d and 'left_hand' in next_data:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if camIdx not in next_data['left_hand']['2D']:
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
if any(next_data['left_hand']['2D'][camIdx]['occluded']) or not all(next_data['left_hand']['2D'][camIdx]['insideImg']) or next_data['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['1_left_hand'].append(left_hand3d_1)
human3d['2_left_hand'].append(left_hand3d_2)
human3d['1_right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['2_right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
if 'right_hand' in data3d and 'right_hand' in next_data:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if camIdx not in next_data['right_hand']['2D']:
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
if any(next_data['right_hand']['2D'][camIdx]['occluded']) or not all(next_data['right_hand']['2D'][camIdx]['insideImg']) or next_data['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['1_right_hand'].append(right_hand3d_1)
human3d['2_right_hand'].append(right_hand3d_2)
human3d['1_left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['2_left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['1_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['2_K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['1_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['2_R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['1_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['2_t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['1_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
calib['2_distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs_1.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
img_dirs_2.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, next_data['frame_str'], camIdx, next_data['frame_str']))
human3d.update(calib)
human3d['1_img_dirs'] = img_dirs_1
human3d['2_img_dirs'] = img_dirs_2
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['1_img_dirs'])
def get(self, withPAF=True):
d = super(DomeReaderTempConst, self).get(withPAF=withPAF)
return d
if __name__ == '__main__':
# d = DomeReaderTempConst(mode='training', shuffle=True, objtype=0, crop_noise=True, full_only=False)
# data_dict = d.get()
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# sess.run(tf.global_variables_initializer())
# tf.train.start_queue_runners(sess=sess)
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# import utils.general
# from utils.vis_heatmap3d import vis_heatmap3d
# from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
# validation_images = []
# for i in range(d.num_samples):
# print('{}/{}'.format(i + 1, d.num_samples))
# values = \
# sess.run([data_dict['1_image_crop'], data_dict['1_img_dir'], data_dict['1_keypoint_uv_local'], data_dict['1_body_valid'], data_dict['1_scoremap2d'],
# data_dict['1_PAF'], data_dict['1_mask_crop'], data_dict['1_keypoint_xyz_local'],
# data_dict['2_image_crop'], data_dict['2_img_dir'], data_dict['2_keypoint_uv_local'], data_dict['2_body_valid'], data_dict['2_scoremap2d'],
# data_dict['2_PAF'], data_dict['2_mask_crop'], data_dict['2_keypoint_xyz_local']
# ])
# image_crop_1, img_dir_1, body2d_1, body_valid_1, body2d_heatmap_1, PAF_1, mask_crop_1, body3d_1, \
# image_crop_2, img_dir_2, body2d_2, body_valid_2, body2d_heatmap_2, PAF_2, mask_crop_2, body3d_2 = [np.squeeze(_) for _ in values]
# image_name_1 = img_dir_1.item().decode()
# image_name_2 = img_dir_2.item().decode()
# image_v_1 = ((image_crop_1 + 0.5) * 255).astype(np.uint8)
# image_v_2 = ((image_crop_2 + 0.5) * 255).astype(np.uint8)
# body2d_detected_1, bscore_1 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_1, PAF_1)
# body2d_detected_2, bscore_2 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_2, PAF_2)
# # body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:20, :]
# body3d_detected_1, _ = PAF_to_3D(body2d_detected_1, PAF_1, objtype=0)
# body3d_detected_2, _ = PAF_to_3D(body2d_detected_2, PAF_2, objtype=0)
# # body3d_detected = body3d_detected[:21, :]
# fig = plt.figure(1)
# ax1 = fig.add_subplot(241)
# plt.imshow(image_v_1)
# utils.general.plot2d(ax1, body2d_1, type_str='body', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot2d(ax1, body2d_detected_1, type_str='body', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
# ax2 = fig.add_subplot(242)
# plt.imshow(image_v_2)
# utils.general.plot2d(ax2, body2d_2, type_str='body', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot2d(ax2, body2d_detected_2, type_str='body', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
# ax3 = fig.add_subplot(243, projection='3d')
# utils.general.plot3d(ax3, body3d_detected_1, type_str='body', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
# utils.general.plot3d(ax3, body3d_detected_2, type_str='body', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
# ax3.set_xlabel('X Label')
# ax3.set_ylabel('Y Label')
# ax3.set_zlabel('Z Label')
# plt.axis('equal')
# ax4 = fig.add_subplot(244, projection='3d')
# utils.general.plot3d(ax4, body3d_1, type_str='body', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
# utils.general.plot3d(ax4, body3d_2, type_str='body', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
# ax4.set_xlabel('X Label')
# ax4.set_ylabel('Y Label')
# ax4.set_zlabel('Z Label')
# plt.axis('equal')
# xy, z = plot_all_PAF(PAF_1, 3)
# ax5 = fig.add_subplot(245)
# ax5.imshow(xy)
# ax6 = fig.add_subplot(246)
# ax6.imshow(z)
# xy, z = plot_all_PAF(PAF_2, 3)
# ax7 = fig.add_subplot(247)
# ax7.imshow(xy)
# ax8 = fig.add_subplot(248)
# ax8.imshow(z)
# plt.show()
d = DomeReaderTempConst(mode='training', shuffle=True, objtype=1, crop_noise=True, full_only=False)
d.crop_scale_noise_sigma = 0.4
d.crop_offset_noise_sigma = 0.2
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['1_image_crop'], data_dict['1_img_dir'], data_dict['1_keypoint_uv_local'], data_dict['1_hand_valid'], data_dict['1_scoremap2d'],
data_dict['1_PAF'], data_dict['1_mask_crop'], data_dict['1_keypoint_xyz_local'],
data_dict['2_image_crop'], data_dict['2_img_dir'], data_dict['2_keypoint_uv_local'], data_dict['2_hand_valid'], data_dict['2_scoremap2d'],
data_dict['2_PAF'], data_dict['2_mask_crop'], data_dict['2_keypoint_xyz_local']
])
image_crop_1, img_dir_1, body2d_1, body_valid_1, body2d_heatmap_1, PAF_1, mask_crop_1, body3d_1, \
image_crop_2, img_dir_2, body2d_2, body_valid_2, body2d_heatmap_2, PAF_2, mask_crop_2, body3d_2 = [np.squeeze(_) for _ in values]
image_name_1 = img_dir_1.item().decode()
image_name_2 = img_dir_2.item().decode()
image_v_1 = ((image_crop_1 + 0.5) * 255).astype(np.uint8)
image_v_2 = ((image_crop_2 + 0.5) * 255).astype(np.uint8)
body2d_detected_1, bscore_1 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_1, PAF_1, objtype=1)
body2d_detected_2, bscore_2 = utils.PAF.detect_keypoints2d_PAF(body2d_heatmap_2, PAF_2, objtype=1)
# body2d_detected = utils.general.detect_keypoints2d(body2d_heatmap)[:20, :]
body3d_detected_1, _ = PAF_to_3D(body2d_detected_1, PAF_1, objtype=1)
body3d_detected_2, _ = PAF_to_3D(body2d_detected_2, PAF_2, objtype=1)
body3d_detected_1 = body3d_detected_1[:21, :]
body3d_detected_2 = body3d_detected_2[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v_1)
utils.general.plot2d(ax1, body2d_1, type_str='hand', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, body2d_detected_1, type_str='hand', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(242)
plt.imshow(image_v_2)
utils.general.plot2d(ax2, body2d_2, type_str='hand', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax2, body2d_detected_2, type_str='hand', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
ax3 = fig.add_subplot(243, projection='3d')
utils.general.plot3d(ax3, body3d_detected_1, type_str='hand', valid_idx=body_valid_1, color=np.array([0.0, 0.0, 1.0]))
utils.general.plot3d(ax3, body3d_detected_2, type_str='hand', valid_idx=body_valid_2, color=np.array([0.0, 0.0, 1.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
ax4 = fig.add_subplot(244, projection='3d')
utils.general.plot3d(ax4, body3d_1, type_str='hand', valid_idx=body_valid_1, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot3d(ax4, body3d_2, type_str='hand', valid_idx=body_valid_2, color=np.array([1.0, 0.0, 0.0]))
ax4.set_xlabel('X Label')
ax4.set_ylabel('Y Label')
ax4.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF_1, 3)
ax5 = fig.add_subplot(245)
ax5.imshow(xy)
ax6 = fig.add_subplot(246)
ax6.imshow(z)
xy, z = plot_all_PAF(PAF_2, 3)
ax7 = fig.add_subplot(247)
ax7.imshow(xy)
ax8 = fig.add_subplot(248)
ax8.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/DomeReaderTempConst.py |
import os
import numpy as np
import numpy.linalg as nl
import json
import pickle
map_body25_to_body19 = list(range(8)) + list(range(9, 25)) # total of 24
seqName = 'Dexter_Grasp2'
# root = '/home/donglaix/Documents/Experiments/{}'.format(seqName)
root = '/media/posefs1b/Users/donglaix/siggasia018/{}/'.format(seqName)
calib_file = os.path.join(root, 'calib.json')
with open(calib_file) as f:
calib_data = json.load(f)
start = 0
end = 648
frameRange = range(start, end)
person_idx = -1
# -1 for most obvious person, -2 for second obvious person
bs = []
ls = []
rs = []
fs = []
img_dirs = []
for i in frameRange:
# img_file = os.path.join('openpose_image', '{}_{:012d}.jpg'.format(seqName, i)) if os.path.exists(os.path.join(root, 'openpose_image', '{}_{:012d}.jpg'.format(seqName, i))) \
# else os.path.join('openpose_image', '{}_{:012d}.png'.format(seqName, i)) # Openpose run on images
img_file = os.path.join('openpose_image', '{}_{:012d}_rendered.png'.format(seqName, i)) # Openpose run on video
assert os.path.exists(os.path.join(root, img_file))
annot_2d = os.path.join(root, 'openpose_result', '{}_{:012d}_keypoints.json'.format(seqName, i))
assert os.path.exists(annot_2d)
with open(annot_2d) as f:
data = json.load(f)
scores = []
areas = []
for ip in range(len(data["people"])):
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
score = np.sum(joint2d[:, 2]) + np.sum(left_hand2d[:, 2]) + np.sum(right_hand2d[:, 2]) + np.sum(face2d[:, 2])
scores.append(score)
joint_valid = (joint2d[:, 0] > 0.0) * (joint2d[:, 1] > 0.0)
joint_nonzero = joint2d[joint_valid, :][:, :2]
mx, my = joint_nonzero.min(axis=0)
Mx, My = joint_nonzero.max(axis=0)
areas.append((Mx - mx) * (My - my))
scores = np.array(scores)
areas = np.array(areas)
idx = np.argsort(scores)
# idx = np.argsort(areas)
ip = idx[person_idx]
joint2d = np.array(data["people"][ip]["pose_keypoints_2d"]).reshape(-1, 3)
left_hand2d = np.array(data["people"][ip]["hand_left_keypoints_2d"]).reshape(-1, 3)
right_hand2d = np.array(data["people"][ip]["hand_right_keypoints_2d"]).reshape(-1, 3)
face2d = np.array(data["people"][ip]["face_keypoints_2d"]).reshape(-1, 3)
final_body = joint2d[map_body25_to_body19]
final_left = left_hand2d
final_right = right_hand2d
final_face = face2d
bs.append(final_body)
fs.append(final_face)
ls.append(final_left)
rs.append(final_right)
img_dirs.append(img_file)
img_dirs = np.array(img_dirs)
bs = np.array(bs)
ls = np.array(ls)
rs = np.array(rs)
fs = np.array(fs)
print((len(ls), len(rs), len(fs), len(bs), len(img_dirs)))
with open('{}.pkl'.format(seqName), 'wb') as f:
pickle.dump((bs, ls, rs, fs, img_dirs, calib_data), f)
| body2hands-main | visualization/POF/data/collect_openpose.py |
import tensorflow as tf
class MultiDataset(object):
# A class to combine multi dataset input
def __init__(self, db_list):
assert type(db_list) == list and len(db_list) >= 1
self.db_list = db_list
def get(self, name_wanted):
data_list = []
for i, db in enumerate(self.db_list):
data = db.get()
data_list.append(data)
ret_data = {}
for name in name_wanted:
ret_data[name] = tf.concat([d[name] for d in data_list], axis=0)
return ret_data
def combineMultiDataset(data_list, name_wanted):
# data_list is a list of data_dict
ret_data = {}
for name in name_wanted:
ret_data[name] = tf.concat([d[name] for d in data_list], axis=0)
return ret_data
if __name__ == '__main__':
pass
| body2hands-main | visualization/POF/data/MultiDataset.py |
import tensorflow as tf
import os
import numpy as np
from data.BaseReader import BaseReader
import pickle
from data.collect_stb import PATH_TO_DATASET, K, Rl, Rr, tl, tr, TRAIN_SEQS, TEST_SEQS
from utils.keypoint_conversion import STB_to_main
class STBReader(BaseReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
assert objtype == 1
super(STBReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.name = 'STB'
self.image_root = PATH_TO_DATASET
path_to_db = './data/stb_collected.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
if mode == 'training':
mode_data = db_data[0]
SEQS = TRAIN_SEQS
else:
mode_data = db_data[1]
SEQS = TEST_SEQS
assert mode_data.shape[0] == len(SEQS) * 1500
hand3d = np.tile(mode_data, [2, 1, 1]).astype(np.float32)
hand3d[:, 0] = 2 * hand3d[:, 0] - hand3d[:, 9]
self.num_samples = hand3d.shape[0]
Ks = np.array([K] * self.num_samples, dtype=np.float32)
Rs = np.array([Rl] * mode_data.shape[0] + [Rr] * mode_data.shape[0], dtype=np.float32)
ts = np.array([tl] * mode_data.shape[0] + [tr] * mode_data.shape[0], dtype=np.float32)
distCoef = np.zeros([self.num_samples, 5], dtype=np.float32)
left_hand_valid = np.ones([self.num_samples, 21], dtype=bool)
img_dirs = [os.path.join(self.image_root, seq, 'BB_left_{}.png').format(i) for seq in SEQS for i in range(1500)] + \
[os.path.join(self.image_root, seq, 'BB_right_{}.png'.format(i)) for seq in SEQS for i in range(1500)]
human3d = {'K': Ks, 'R': Rs, 't': ts, 'distCoef': distCoef,
'left_hand': hand3d, 'left_hand_valid': left_hand_valid, 'img_dirs': img_dirs,
'right_hand': np.zeros([self.num_samples, 21, 3], dtype=np.float32), 'right_hand_valid': np.zeros([self.num_samples, 21], dtype=bool)}
self.register_tensor(human3d, STB_to_main)
def get(self):
d = super(STBReader, self).get(imw=640, imh=480)
return d
if __name__ == '__main__':
d = STBReader(mode='evaluation', shuffle=True, objtype=1, crop_noise=True)
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['left_hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local'], data_dict['keypoint_uv_origin'], data_dict['image']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d, hand2d_origin, image_full = [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
ax6 = fig.add_subplot(236)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
plt.show()
| body2hands-main | visualization/POF/data/STBReader.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import numpy.linalg as nl
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class DomeReader(BaseReader):
def __init__(self, mode='training', objtype=0, shuffle=False, batch_size=1, crop_noise=False, full_only=True, head_top=True):
super(DomeReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.image_root = '/media/posefs0c/panopticdb/'
# read data from a4
path_to_db = './data/a4_collected.pkl'
path_to_calib = './data/camera_data_a4.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
with open('./data/a4_hands_annotated.txt') as f:
hand_annots = {}
for line in f:
strs = line.split()
hand_annots[tuple(strs[:3])] = eval(strs[3])
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
human3d = {'body': [], 'left_hand': [], 'right_hand': [], 'body_valid': [], 'left_hand_valid': [], 'right_hand_valid': []}
calib = {'K': [], 'R': [], 't': [], 'distCoef': []}
img_dirs = []
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
# check for manual annotation, remove the annotation if the hand is annotated as incorrect.
if 'left_hand' in data3d and not hand_annots[(seqName, frame_str, 'left')]:
del data3d['left_hand']
if 'right_hand' in data3d and not hand_annots[(seqName, frame_str, 'righ')]:
del data3d['right_hand']
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
head_top_kp = body3d[0] + 1.5 * d * n
if head_top:
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d) and ('right_hand' not in data3d):
continue
else:
assert objtype == 2
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
# both left and right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
# discard the sample if hand is wanted but there is no left hand.
else:
continue
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
else:
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if full_only:
cond_inside = all(camDict['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
cond_inside = (inside_ratio > 0.5)
if any(camDict['occluded']) or not cond_inside:
continue
human3d['body'].append(body3d)
human3d['body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
elif objtype == 1:
if 'left_hand' in data3d:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
if 'right_hand' in data3d:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['right_hand'].append(right_hand3d)
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
else:
assert objtype == 2
for camIdx, camDict in data3d['body']['2D'].items():
if any(camDict['occluded']) or not all(camDict['insideImg']):
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']):
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']):
continue
# If this line is reached, the sample and cam view is valid.
human3d['body'].append(body3d)
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(right_hand3d)
human3d['body_valid'].append(np.ones((18,), dtype=bool))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a4', seqName, frame_str, camIdx, frame_str))
if mode == 'evaluation':
if objtype == 2:
openpose_output_file = '/home/donglaix/Documents/Experiments/dome_valid/a4_openpose.json'
assert os.path.exists(openpose_output_file)
with open(openpose_output_file) as f:
openpose_data = json.load(f)
openpose_data = np.array(openpose_data, dtype=np.float32).reshape(-1, 70, 3)
openpose_valid = (openpose_data[:, :, 2] >= 0.5)
openpose_data[:, :, 0] *= openpose_valid
openpose_data[:, :, 1] *= openpose_valid
openpose_face = openpose_data[:, :, :2]
human3d['openpose_face'] = openpose_face
# read data from a5
path_to_db = './data/a5_collected.pkl'
path_to_calib = './data/camera_data_a5.pkl'
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
if mode == 'training':
mode_data = db_data['training_data']
else:
mode_data = db_data['testing_data']
with open(path_to_calib, 'rb') as f:
calib_data = pickle.load(f)
for data3d in mode_data:
seqName = data3d['seqName']
frame_str = data3d['frame_str']
if objtype == 0:
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
nose_lear = body3d[16] - body3d[1]
nose_rear = body3d[18] - body3d[1]
neck_nose = body3d[1] - body3d[0]
n = np.cross(nose_lear, nose_rear)
n = n / nl.norm(n)
d = np.dot(neck_nose, n)
assert d > 0
if head_top:
head_top_kp = body3d[0] + 1.5 * d * n
body3d = np.concatenate((body3d, head_top_kp[np.newaxis, :]), axis=0)
chest = 0.5 * body3d[0] + 0.25 * (body3d[6] + body3d[12])
body3d = np.concatenate((body3d, chest[np.newaxis, :]), axis=0)
elif objtype == 1:
# left hand or right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
if ('left_hand' not in data3d) and ('right_hand' not in data3d):
continue
else:
assert objtype == 2
body3d = np.array(data3d['body']['landmarks'], dtype=np.float32).reshape(-1, 3)
# both left and right hand must be valid
if 'left_hand' in data3d:
left_hand3d = np.array(data3d['left_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
# discard the sample if hand is wanted but there is no left hand.
else:
continue
if 'right_hand' in data3d:
right_hand3d = np.array(data3d['right_hand']['landmarks'], dtype=np.float32).reshape(-1, 3)
else:
continue
if objtype == 0:
for camIdx, camDict in data3d['body']['2D'].items():
if full_only:
cond_inside = all(camDict['insideImg'])
else: # if not full_only, use the image if at least half keypoints are visible
inside_ratio = np.float(np.sum(camDict['insideImg'])) / len(camDict['insideImg'])
cond_inside = (inside_ratio > 0.5)
if any(camDict['occluded']) or not cond_inside:
continue
human3d['body'].append(body3d)
human3d['body_valid'].append(np.ones((20 if head_top else 19,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
elif objtype == 1:
if 'left_hand' in data3d:
for camIdx, camDict in data3d['left_hand']['2D'].items():
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']) or data3d['left_hand']['2D'][camIdx]['overlap']:
continue
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
if 'right_hand' in data3d:
for camIdx, camDict in data3d['right_hand']['2D'].items():
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']) or data3d['right_hand']['2D'][camIdx]['overlap']:
continue
human3d['right_hand'].append(right_hand3d)
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
else:
assert objtype == 2
for camIdx, camDict in data3d['body']['2D'].items():
if any(camDict['occluded']) or not all(camDict['insideImg']):
continue
if any(data3d['left_hand']['2D'][camIdx]['occluded']) or not all(data3d['left_hand']['2D'][camIdx]['insideImg']):
continue
if any(data3d['right_hand']['2D'][camIdx]['occluded']) or not all(data3d['right_hand']['2D'][camIdx]['insideImg']):
continue
# If this line is reached, the sample and cam view is valid.
human3d['body'].append(body3d)
human3d['left_hand'].append(left_hand3d)
human3d['right_hand'].append(right_hand3d)
human3d['body_valid'].append(np.ones((18,), dtype=bool))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
calib['K'].append(calib_data[seqName][camIdx]['K'].astype(np.float32))
calib['R'].append(calib_data[seqName][camIdx]['R'].astype(np.float32))
calib['t'].append(calib_data[seqName][camIdx]['t'][:, 0].astype(np.float32))
calib['distCoef'].append(calib_data[seqName][camIdx]['distCoef'].astype(np.float32))
img_dirs.append('{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(self.image_root, 'a5', seqName, frame_str, camIdx, frame_str))
human3d.update(calib)
human3d['img_dirs'] = img_dirs
# import cv2
# for img_dir in img_dirs:
# if cv2.imread(img_dir) is None:
# print(img_dir)
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self, withPAF=True):
d = super(DomeReader, self).get(withPAF=withPAF)
return d
if __name__ == '__main__':
d = DomeReader(mode='training', shuffle=True, objtype=1, crop_noise=True, full_only=False)
# d.rotate_augmentation = True
# d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d = [np.squeeze(_) for _ in values]
image_name = img_dir.item().decode()
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
hand2d_detected, bscore = utils.PAF.detect_keypoints2d_PAF(hand2d_heatmap, PAF, objtype=1)
# hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:20, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/DomeReader.py |
import tensorflow as tf
import os
import numpy as np
import json
from data.Base2DReader import Base2DReader
from utils.keypoint_conversion import COCO_to_main, MPII_to_main
class COCOReader(Base2DReader):
def __init__(self, name='COCO', mode='training', objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(COCOReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
self.name = name
assert name in ('COCO', 'MPII')
assert mode in ('training', 'evaluation')
if name == 'COCO':
self.image_root = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/cocoapi/images/train2017/'
self.mask_root = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/cocoapi/images/mask2017/train2017/'
assert mode == 'training'
path_to_db = '/media/posefs3b/Users/gines/openpose_train/dataset/COCO/json/COCO.json'
with open(path_to_db) as f:
db_data = json.load(f)
img_dirs = []
mask_dirs = []
human = {'body': [], 'body_valid': [], 'other_bbox': []}
for i, image_data in enumerate(db_data['root']):
# bounding box test
# discard the image if this bounding box overlaps with any other bounding box
bbox = np.array(image_data['bbox'], dtype=np.float32)
bbox[2:] += bbox[:2]
if type(image_data['bbox_other']) != dict and len(image_data['bbox_other']) > 0:
bbox_other = np.array(image_data['bbox_other'], dtype=np.float32).reshape(-1, 4)
bbox_other[:, 2:] += bbox_other[:, :2]
# xmin = np.maximum(bbox_other[:, 0], bbox[0])
# ymin = np.maximum(bbox_other[:, 1], bbox[1])
# xmax = np.minimum(bbox_other[:, 2], bbox[2])
# ymax = np.minimum(bbox_other[:, 3], bbox[3])
# overlap_cond = np.logical_and(xmin < xmax, ymin < ymax).any()
# if overlap_cond:
# continue
zero_left = np.zeros([20 - bbox_other.shape[0], 4])
bbox_other = np.concatenate([bbox_other, zero_left], axis=0).astype(np.int32)
else:
bbox_other = np.zeros([20, 4], dtype=np.int32)
body = np.array(image_data['joint_self'], dtype=int)
if np.sum(body[:, 2] == 1) <= 3:
continue
img_dirs.append(os.path.join(self.image_root, image_data['img_paths']))
mask_dirs.append(os.path.join(self.mask_root, image_data['img_paths'][:-3] + 'png'))
neck = (body[5:6, :2] + body[6:7, :2]) / 2
heattop = np.zeros((1, 2), dtype=int)
chest = 0.25 * (body[5:6, :2] + body[6:7, :2] + body[11:12, :2] + body[12:13, :2])
neck_valid = np.logical_and(body[5:6, 2] == 1, body[6:7, 2] == 1)
heattop_valid = np.zeros((1,), dtype=bool)
chest_valid = np.logical_and(body[5:6, 2] == 1, body[6:7, 2] == 1) * np.logical_and(body[11:12, 2] == 1, body[12:13, 2] == 1)
body2d = np.concatenate([body[:, :2], neck, heattop, chest], axis=0)
valid = np.concatenate([body[:, 2] == 1, neck_valid, heattop_valid, chest_valid])
human['body'].append(body2d.astype(np.float32))
human['body_valid'].append(valid.astype(bool))
human['other_bbox'].append(bbox_other)
human['img_dirs'] = img_dirs
human['mask_dirs'] = mask_dirs
order_dict = COCO_to_main
elif name == 'MPII':
self.image_root = '/media/posefs3b/Datasets/MPI/images/'
self.mask_root = '/media/posefs3b/Users/donglaix/mpii_mask/'
path_to_db = 'data/MPII_collected.json'
with open(path_to_db) as f:
db_data = json.load(f)
total_num = len(db_data['img_paths'])
human = {'body': [], 'body_valid': [], 'other_bbox': []}
img_dirs = []
mask_dirs = []
for i in range(total_num):
if (mode == 'training' and not db_data['is_train'][i]) and (mode == 'evaluation' and db_data['is_train'][i]):
continue
body = np.array(db_data['joint_self'][i], dtype=int)
if np.sum(body[:, 2] == 1) <= 3:
continue
img_dirs.append(os.path.join(self.image_root, db_data['img_paths'][i]))
mask_dirs.append(os.path.join(self.mask_root, '{:05d}.png'.format(i)))
body = np.concatenate([body, np.zeros([1, 3], dtype=int)], axis=0)
human['body'].append(body[:, :2].astype(np.float32))
human['body_valid'].append(body[:, 2].astype(bool))
human['img_dirs'] = img_dirs
human['mask_dirs'] = mask_dirs
order_dict = MPII_to_main
else:
raise NotImplementedError
self.register_tensor(human, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self):
if self.name == 'COCO':
d = super(COCOReader, self).get(withPAF=True, read_image=True, imw=640, imh=640)
elif self.name == 'MPII':
d = super(COCOReader, self).get(withPAF=True, read_image=True, imw=1920, imh=1080)
else:
raise NotImplementedError
return d
if __name__ == '__main__':
dataset = COCOReader(name='COCO', mode='training', shuffle=False, objtype=0, crop_noise=False)
data_dict = dataset.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
import utils.general
from utils.PAF import plot_all_PAF, plot_PAF
for i in range(dataset.num_samples):
image_crop, image, body2d, body_valid, img_dir, mask, mask_crop, PAF, PAF_type = \
sess.run([data_dict['image_crop'], data_dict['image'], data_dict['keypoint_uv_local'], data_dict['body_valid'], data_dict['img_dir'], data_dict['mask'],
data_dict['mask_crop'], data_dict['PAF'], data_dict['PAF_type']])
print ('{}: {}'.format(i, img_dir[0].decode()))
body2d = np.squeeze(body2d)
body_valid = np.squeeze(body_valid)
image_crop = np.squeeze((image_crop + 0.5) * 255).astype(np.uint8)
image = np.squeeze((image + 0.5) * 255).astype(np.uint8)
mask = np.squeeze(mask)
mask_crop = np.squeeze(mask_crop)
PAF = np.squeeze(PAF)
mask_image = np.stack([mask] * 3, axis=2)
mask_crop_image = np.stack([mask_crop] * 3, axis=2)
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_crop)
utils.general.plot2d(ax1, body2d, valid_idx=body_valid)
ax2 = fig.add_subplot(232)
plt.imshow(image)
ax3 = fig.add_subplot(233)
plt.gray()
plt.imshow(mask_image)
ax4 = fig.add_subplot(234)
plt.gray()
plt.imshow(mask_crop_image)
ax5 = fig.add_subplot(235)
PAF_img, img_z = plot_all_PAF(PAF, 3)
ax5.imshow(PAF_img)
ax6 = fig.add_subplot(236)
ax6.imshow(img_z)
plt.show()
| body2hands-main | visualization/POF/data/COCOReader.py |
import pickle
from scipy.io import loadmat
import os
import numpy as np
PATH_TO_DATASET = '/media/posefs0c/Users/donglaix/Experiments/StereoHandTracking/'
TEST_SEQS = ['B1Counting', 'B1Random']
TRAIN_SEQS = ['B2Counting', 'B2Random', 'B3Counting', 'B3Random', 'B4Counting', 'B4Random', 'B5Counting', 'B5Random', 'B6Counting', 'B6Random']
K = np.diag([822.79041, 822.79041, 1.0]).astype(np.float32)
K[0, 2] = 318.47345
K[1, 2] = 250.31296
base = 120.054
Rl = np.eye(3, dtype=np.float32)
Rr = np.eye(3, dtype=np.float32)
tl = np.zeros((3,), dtype=np.float32)
tr = np.array([-base, 0, 0], dtype=np.float32)
if __name__ == '__main__':
assert os.path.isdir(PATH_TO_DATASET)
# collect the testing sequences
all_test_data = np.zeros((0, 21, 3), dtype=np.float32)
for test_seq in TEST_SEQS:
mat_path = os.path.join(PATH_TO_DATASET, 'labels', test_seq + '_BB.mat')
mat_data = loadmat(mat_path)
mat_data = np.transpose(mat_data['handPara'], (2, 1, 0))
all_test_data = np.concatenate((all_test_data, mat_data), axis=0)
all_train_data = np.zeros((0, 21, 3), dtype=np.float32)
for train_seq in TRAIN_SEQS:
mat_path = os.path.join(PATH_TO_DATASET, 'labels', train_seq + '_BB.mat')
mat_data = loadmat(mat_path)
mat_data = np.transpose(mat_data['handPara'], (2, 1, 0))
all_train_data = np.concatenate((all_train_data, mat_data), axis=0)
with open('stb_collected.pkl', 'wb') as f:
pickle.dump((all_train_data, all_test_data), f)
| body2hands-main | visualization/POF/data/collect_stb.py |
import tensorflow as tf
from data.Base2DReader import Base2DReader
import os
import pickle
import numpy as np
from utils.keypoint_conversion import GAnerated_to_main as order_dict
class GAneratedReader(Base2DReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
super(GAneratedReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'training'
assert objtype == 1
self.name = 'GAnerated'
self.image_root = '/media/posefs1b/Users/donglaix/hand_datasets/GANeratedHands_Release/data/' # GANerated
self.path_to_db = '/media/posefs1b/Users/donglaix/hand_datasets/GANeratedHands_Release/data/collected_data.pkl'
human2d = {'left_hand': [], 'right_hand': [], 'left_hand_valid': [], 'right_hand_valid': []}
with open(self.path_to_db, 'rb') as f:
db_data = pickle.load(f)
# load a tuple of 3 elements: list of img dirs, array of 2D joint, array of 3D joint
img_dirs = [os.path.join(self.image_root, _) for _ in db_data[0]]
human2d['right_hand'] = np.zeros((len(img_dirs), 21, 2), dtype=np.float32)
human2d['right_hand_valid'] = np.zeros((len(img_dirs), 21), dtype=bool)
human2d['right_hand_3d'] = np.zeros((len(img_dirs), 21, 3), dtype=np.float32)
human2d['left_hand'] = db_data[1].astype(np.float32)
human2d['left_hand_valid'] = np.ones((len(img_dirs), 21), dtype=bool)
human2d['left_hand_3d'] = db_data[2].astype(np.float32)
human2d['img_dirs'] = img_dirs
self.num_samples = len(img_dirs)
self.register_tensor(human2d, order_dict)
def get(self):
d = super(GAneratedReader, self).get(imw=256, imh=256)
return d
if __name__ == '__main__':
d = GAneratedReader()
d.rotate_augmentation = True
d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['hand_valid'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_origin']])
image_crop, img_dir, hand2d, hand_valid, hand2d_heatmap, PAF, mask_crop, hand3d = [np.squeeze(_) for _ in values]
image_name = img_dir.item().decode()
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2 = fig.add_subplot(232, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
max_range = 0.5 * (np.amax(hand3d_detected, axis=0) - np.amin(hand3d_detected, axis=0)).max()
center = 0.5 * (np.amax(hand3d_detected, axis=0) + np.amin(hand3d_detected, axis=0))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
ax2.set_xlim(center[0] - max_range, center[0] + max_range)
ax2.set_ylim(center[1] - max_range, center[1] + max_range)
ax2.set_zlim(center[2] - max_range, center[2] + max_range)
ax3 = fig.add_subplot(233, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
max_range = 0.5 * (np.amax(hand3d, axis=0) - np.amin(hand3d, axis=0)).max()
center = 0.5 * (np.amax(hand3d, axis=0) + np.amin(hand3d, axis=0))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
ax3.set_xlim(center[0] - max_range, center[0] + max_range)
ax3.set_ylim(center[1] - max_range, center[1] + max_range)
ax3.set_zlim(center[2] - max_range, center[2] + max_range)
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(234)
ax4.imshow(xy)
ax5 = fig.add_subplot(235)
ax5.imshow(z)
plt.show()
| body2hands-main | visualization/POF/data/GAneratedReader.py |
import pickle
import os
import numpy as np
from utils.general import plot2d_cv2
import cv2
map_index = np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=int)
def project(joints, K, R=None, t=None, distCoef=None):
""" Perform Projection.
joints: N * 3
"""
x = joints.T
if R is not None:
x = np.dot(R, x)
if t is not None:
x = x + t.reshape(3, 1)
xp = x[:2, :] / x[2, :]
if distCoef is not None:
X2 = xp[0, :] * xp[0, :]
Y2 = xp[1, :] * xp[1, :]
XY = X2 * Y2
R2 = X2 + Y2
R4 = R2 * R2
R6 = R4 * R2
dc = distCoef
radial = 1.0 + dc[0] * R2 + dc[1] * R4 + dc[4] * R6
tan_x = 2.0 * dc[2] * XY + dc[3] * (R2 + 2.0 * X2)
tan_y = 2.0 * dc[3] * XY + dc[2] * (R2 + 2.0 * Y2)
xp[0, :] = radial * xp[0, :] + tan_x
xp[1, :] = radial * xp[1, :] + tan_y
pt = np.dot(K[:2, :2], xp) + K[:2, 2].reshape((2, 1))
return pt.T, x.T
if __name__ == '__main__':
image_root = '/media/posefs0c/panopticdb/'
save_root = '/media/posefs1b/Users/donglaix/clean_a4_hand/crop_hand_new/'
with open('./data/a4_collected.pkl', 'rb') as f:
data = pickle.load(f)
with open('./data/camera_data_a4.pkl', 'rb') as f:
cam_data = pickle.load(f)
for set_name, set_data in data.items():
for i, sample_data in enumerate(set_data):
print ('processing {} {} / {}'.format(set_name, i, len(set_data)))
seqName = sample_data['seqName']
frame_str = sample_data['frame_str']
if 'left_hand' in sample_data:
joints = np.array(sample_data['left_hand']['landmarks']).reshape(-1, 3)
joints = joints[map_index]
count_img = 0
for c in np.random.permutation(31):
if count_img == 3: # enough
break
if c not in sample_data['left_hand']['2D']:
continue
if sum(sample_data['left_hand']['2D'][c]['insideImg']) < 15 or \
sum(sample_data['left_hand']['2D'][c]['occluded']) > 5 or (sample_data['left_hand']['2D'][c]['occluded'] == 1):
continue
count_img += 1
joint2d, _ = project(joints, cam_data[seqName][c]['K'], cam_data[seqName][c]['R'], cam_data[seqName][c]['t'], cam_data[seqName][c]['distCoef'])
img_name = '{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(image_root, 'a4', seqName, frame_str, c, frame_str)
img = cv2.imread(img_name)
assert img is not None
x1 = np.amin(joint2d[:, 0])
x2 = np.amax(joint2d[:, 0])
y1 = np.amin(joint2d[:, 1])
y2 = np.amax(joint2d[:, 1])
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
size = max(x2 - x1, y2 - y1)
scale = 200 / (1.5 * size)
M = np.array([[scale, 0, (100 - scale * cx)],
[0, scale, (100 - scale * cy)]], dtype=float)
target_img = cv2.warpAffine(img, M, (200, 200))
tjoint2d = (joint2d - np.array([cx, cy])) * scale + 100
plot2d_cv2(target_img, tjoint2d, 'hand', s=3, use_color=True)
filename = '{}#{}#left#{:02d}.png'.format(seqName, frame_str, c)
cv2.imwrite(os.path.join(save_root, filename), target_img)
if 'right_hand' in sample_data:
joints = np.array(sample_data['right_hand']['landmarks']).reshape(-1, 3)
joints = joints[map_index]
count_img = 0
for c in np.random.permutation(31):
if count_img == 3: # enough
break
if c not in sample_data['right_hand']['2D']:
continue
if sum(sample_data['right_hand']['2D'][c]['insideImg']) < 15 or \
sum(sample_data['right_hand']['2D'][c]['occluded']) > 5 or (sample_data['right_hand']['2D'][c]['occluded'] == 1):
continue
count_img += 1
joint2d, _ = project(joints, cam_data[seqName][c]['K'], cam_data[seqName][c]['R'], cam_data[seqName][c]['t'], cam_data[seqName][c]['distCoef'])
img_name = '{}/{}/hdImgs/{}/{}/00_{:02d}_{}.jpg'.format(image_root, 'a4', seqName, frame_str, c, frame_str)
img = cv2.imread(img_name)
assert img is not None
x1 = np.amin(joint2d[:, 0])
x2 = np.amax(joint2d[:, 0])
y1 = np.amin(joint2d[:, 1])
y2 = np.amax(joint2d[:, 1])
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
size = max(x2 - x1, y2 - y1)
scale = 200 / (1.5 * size)
M = np.array([[scale, 0, (100 - scale * cx)],
[0, scale, (100 - scale * cy)]], dtype=float)
target_img = cv2.warpAffine(img, M, (200, 200))
tjoint2d = (joint2d - np.array([cx, cy])) * scale + 100
plot2d_cv2(target_img, tjoint2d, 'hand', s=3, use_color=True)
filename = '{}#{}#righ#{:02d}.png'.format(seqName, frame_str, c)
cv2.imwrite(os.path.join(save_root, filename), target_img)
| body2hands-main | visualization/POF/data/collect_crop_hand.py |
import tensorflow as tf
import numpy as np
import json
from data.Base2DReader import Base2DReader
import os
from utils.keypoint_conversion import tsimon_to_main as order_dict
class TsimonDBReader(Base2DReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
super(TsimonDBReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'training'
assert objtype == 1
self.name = 'Tsimon'
self.image_root = '/media/posefs0c/Users/donglaix/tsimon/'
self.path_to_db = ['/media/posefs0c/Users/donglaix/tsimon/hands_v12.json', '/media/posefs0c/Users/donglaix/tsimon/hands_v13.json', '/media/posefs0c/Users/donglaix/tsimon/hands_v143.json']
human2d = {'left_hand': [], 'right_hand': [], 'left_hand_valid': [], 'right_hand_valid': []}
img_dirs = []
for filename in self.path_to_db:
with open(filename) as f:
filedata = json.load(f)
for ihand, hand_data in enumerate(filedata['root']):
joint2d = np.array(hand_data['joint_self'])
human2d['right_hand'].append(joint2d[:, :2].astype(np.float32))
human2d['right_hand_valid'].append(joint2d[:, 2].astype(bool))
human2d['left_hand'].append(np.zeros((21, 2), dtype=np.float32))
human2d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
img_dir = os.path.join(self.image_root, '/'.join(hand_data['img_paths'].split('/')[5:]))
img_dirs.append(img_dir)
human2d['img_dirs'] = img_dirs
self.num_samples = len(img_dirs)
self.register_tensor(human2d, order_dict)
def get(self):
d = super(TsimonDBReader, self).get(imw=1920, imh=1080)
return d
if __name__ == '__main__':
dataset = TsimonDBReader(mode='training', shuffle=True, objtype=1, crop_noise=True)
data_dict = dataset.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(dataset.num_samples):
print('{}/{}'.format(i + 1, dataset.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_uv_origin'], data_dict['image'],
data_dict['left_hand_valid'], data_dict['right_hand_valid']])
image_crop, img_dir, hand2d, hand2d_heatmap, PAF, mask_crop, hand2d_origin, image_full, left_hand_valid, right_hand_valid \
= [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand_valid = right_hand_valid
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(244)
ax4.imshow(xy)
ax6 = fig.add_subplot(246)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax7 = fig.add_subplot(247)
mask_3c = np.stack([mask_crop] * 3, axis=2)
ax7.imshow(mask_3c)
ax8 = fig.add_subplot(248)
ax8.imshow((mask_3c * image_v).astype(np.uint8))
plt.show()
| body2hands-main | visualization/POF/data/TsimonDBReader.py |
import tensorflow as tf
import pickle
from data.BaseReader import BaseReader
import os
import numpy as np
class RHDReader(BaseReader):
def __init__(self, mode='training', objtype=1, shuffle=False, batch_size=1, crop_noise=False):
assert objtype == 1
super(RHDReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode in ('training', 'evaluation')
self.name = 'RHD'
self.image_root = '/media/posefs0c/Users/donglaix/Experiments/RHD_published_v2/{}/'.format(mode)
path_to_db = os.path.join(self.image_root, 'anno_{}.pickle'.format(mode))
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
human3d = {'K': [], 'R': [], 't': [], 'distCoef': [], 'left_hand': [], 'left_hand_valid': [], 'right_hand': [], 'right_hand_valid': []}
img_dirs = []
mask_dirs = []
for i, data in db_data.items():
img_dir = os.path.join(self.image_root, 'color', '{:05d}.png'.format(i))
if data['uv_vis'][:21, 2].all():
# add the left hand
img_dirs.append(img_dir)
human3d['R'].append(np.eye(3, dtype=np.float32))
human3d['t'].append(np.zeros((3,), dtype=np.float32))
human3d['distCoef'].append(np.zeros((5,), dtype=np.float32))
human3d['K'].append(data['K'].astype(np.float32))
human3d['left_hand'].append(data['xyz'][:21, :].astype(np.float32))
human3d['right_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.ones((21,), dtype=bool))
human3d['right_hand_valid'].append(np.zeros((21,), dtype=bool))
mask_dir = os.path.join(self.image_root, 'mask_sep', 'left_{:05d}.png'.format(i))
mask_dirs.append(mask_dir)
if data['uv_vis'][21:, 2].all():
# add the right hand
img_dirs.append(img_dir)
human3d['R'].append(np.eye(3, dtype=np.float32))
human3d['t'].append(np.zeros((3,), dtype=np.float32))
human3d['distCoef'].append(np.zeros((5,), dtype=np.float32))
human3d['K'].append(data['K'].astype(np.float32))
human3d['right_hand'].append(data['xyz'][21:, :].astype(np.float32))
human3d['left_hand'].append(np.zeros((21, 3), dtype=np.float32))
human3d['left_hand_valid'].append(np.zeros((21,), dtype=bool))
human3d['right_hand_valid'].append(np.ones((21,), dtype=bool))
mask_dir = os.path.join(self.image_root, 'mask_sep', 'right_{:05d}.png'.format(i))
mask_dirs.append(mask_dir)
human3d['img_dirs'] = img_dirs
# human3d['mask_dirs'] = mask_dirs
self.register_tensor(human3d, {}) # pass in an empty dict because no order needs to be changed
self.num_samples = len(img_dirs)
def get(self):
d = super(RHDReader, self).get(imw=320, imh=320)
return d
if __name__ == '__main__':
d = RHDReader(mode='training', shuffle=True, objtype=1, crop_noise=True)
d.rotate_augmentation = True
d.blur_augmentation = True
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.05)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.PAF import plot_PAF, PAF_to_3D, plot_all_PAF
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
values = \
sess.run([data_dict['image_crop'], data_dict['img_dir'], data_dict['keypoint_uv_local'], data_dict['scoremap2d'],
data_dict['PAF'], data_dict['mask_crop'], data_dict['keypoint_xyz_local'], data_dict['keypoint_uv_origin'], data_dict['image']])
image_crop, img_dir, hand2d, hand2d_heatmap, PAF, mask_crop, hand3d, hand2d_origin, image_full = [np.squeeze(_) for _ in values]
image_v = ((image_crop + 0.5) * 255).astype(np.uint8)
image_full_v = ((image_full + 0.5) * 255).astype(np.uint8)
hand2d_detected = utils.general.detect_keypoints2d(hand2d_heatmap)[:21, :]
hand3d_detected, _ = PAF_to_3D(hand2d_detected, PAF, objtype=1)
hand3d_detected = hand3d_detected[:21, :]
hand_valid = np.ones((21,), dtype=bool)
fig = plt.figure(1)
ax1 = fig.add_subplot(241)
plt.imshow(image_v)
utils.general.plot2d(ax1, hand2d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
utils.general.plot2d(ax1, hand2d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
for j in range(21):
plt.text(hand2d[j, 0], hand2d[j, 1], str(j))
ax2 = fig.add_subplot(242, projection='3d')
utils.general.plot3d(ax2, hand3d_detected, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax2.set_xlabel('X Label')
ax2.set_ylabel('Y Label')
ax2.set_zlabel('Z Label')
plt.axis('equal')
ax3 = fig.add_subplot(243, projection='3d')
utils.general.plot3d(ax3, hand3d, type_str='hand', valid_idx=hand_valid, color=np.array([1.0, 0.0, 0.0]))
ax3.set_xlabel('X Label')
ax3.set_ylabel('Y Label')
ax3.set_zlabel('Z Label')
plt.axis('equal')
xy, z = plot_all_PAF(PAF, 3)
ax4 = fig.add_subplot(244)
ax4.imshow(xy)
ax5 = fig.add_subplot(245)
ax5.imshow(z)
ax6 = fig.add_subplot(246)
ax6.imshow(image_full_v)
utils.general.plot2d(ax6, hand2d_origin, type_str='hand', valid_idx=hand_valid, color=np.array([0.0, 0.0, 1.0]))
ax7 = fig.add_subplot(247)
mask_3c = np.stack([mask_crop] * 3, axis=2)
ax7.imshow(mask_3c)
ax8 = fig.add_subplot(248)
ax8.imshow((mask_3c * image_v).astype(np.uint8))
plt.show()
| body2hands-main | visualization/POF/data/RHDReader.py |
import os
import pickle
import json
import numpy as np
def load_calib_file(calib_file):
assert os.path.isfile(calib_file)
with open(calib_file) as f:
calib = json.load(f)
for key in calib:
if type(calib[key]) == list:
calib[key] = np.array(calib[key])
return calib
"""
#################################################################
Panoptic A4
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a4/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a4_collected.pkl'):
print('A4 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
if seqName == '171204_pose5' or seqName == '171204_pose6':
testing_data.append(value)
else:
training_data.append(value)
with open('./a4_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
# collect camera calibration data
if os.path.isfile('./camera_data_a4.pkl'):
print('Camere file exists.')
else:
seqs = df.keys()
calib_dict = {}
for seqName in seqs:
cam_dict = {}
for camIdx in range(31):
annot_dir = os.path.join(root, 'annot_calib', seqName)
calib_file = os.path.join(annot_dir, 'calib_00_{:02d}.json'.format(camIdx))
calib = load_calib_file(calib_file)
cam_dict[camIdx] = calib
calib_dict[seqName] = cam_dict
with open('./camera_data_a4.pkl', 'wb') as f:
pickle.dump(calib_dict, f)
"""
#################################################################
Panoptic A5
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a5/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a5_collected.pkl'):
print('A5 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
training_data.append(value)
with open('./a5_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
# collect camera calibration data
if os.path.isfile('./camera_data_a5.pkl'):
print('Camere file exists.')
else:
seqs = df.keys()
calib_dict = {}
for seqName in seqs:
cam_dict = {}
for camIdx in range(31):
annot_dir = os.path.join(root, 'annot_calib', seqName)
calib_file = os.path.join(annot_dir, 'calib_00_{:02d}.json'.format(camIdx))
calib = load_calib_file(calib_file)
cam_dict[camIdx] = calib
calib_dict[seqName] = cam_dict
with open('./camera_data_a5.pkl', 'wb') as f:
pickle.dump(calib_dict, f)
"""
#################################################################
Panoptic A4Plus
#################################################################
"""
# run in Python 3
root = '/media/posefs0c/panopticdb/a4/'
sample_list = os.path.join(root, 'sample_list.pkl')
with open(sample_list, 'rb') as f:
df = pickle.load(f)
# collect hand data
if os.path.isfile('./a4plus_collected.pkl'):
print('A4 collection file exists.')
else:
training_data = []
testing_data = []
for seqName, seq_samples in df.items():
i = 0
for hvframe, frame_dict in seq_samples.items():
i += 1
hv, frame_str = hvframe
print('collecting data: {} {}/{}'.format(seqName, i, len(seq_samples)))
person3df = os.path.join(root, 'annot_{}_3d'.format(hv), seqName, 'Recon3D_{0}{1}.json'.format(hv, frame_str))
with open(person3df) as f:
print(person3df)
person3d = json.load(f)
map_id = {}
for person_data in person3d:
pid = person_data['id']
if pid == -1:
continue
person_dict = {'seqName': seqName, 'frame_str': frame_str, 'id': pid}
body_dict = {'landmarks': person_data['body']['landmarks'], '2D': {}}
person_dict['body'] = body_dict
if 'subjectsWithValidLHand' in frame_dict and pid in frame_dict['subjectsWithValidLHand']:
left_hand_dict = {'landmarks': person_data['left_hand']['landmarks'], '2D': {}}
person_dict['left_hand'] = left_hand_dict
if 'subjectsWithValidRHand' in frame_dict and pid in frame_dict['subjectsWithValidRHand']:
right_hand_dict = {'landmarks': person_data['right_hand']['landmarks'], '2D': {}}
person_dict['right_hand'] = right_hand_dict
map_id[pid] = person_dict
for panelIdx, camIdx in frame_dict['camIdxArray']:
person2df = os.path.join(root, 'annot_{}_2d'.format(hv), seqName, frame_str, 'Recon2D_00_{0:02d}_{1}.json'.format(camIdx, frame_str))
with open(person2df) as f:
person2d = json.load(f)
for person_data in person2d:
pid = person_data['id']
if pid == -1:
continue
person_dict = map_id[pid]
person_dict['body']['2D'][camIdx] = {'insideImg': person_data['body']['insideImg'], 'occluded': person_data['body']['occluded']}
if 'left_hand' in person_dict:
person_dict['left_hand']['2D'][camIdx] = {'insideImg': person_data['left_hand']['insideImg'], 'occluded': person_data['left_hand']['self_occluded'],
'overlap': person_data['left_hand']['overlap']}
if 'right_hand' in person_dict:
person_dict['right_hand']['2D'][camIdx] = {'insideImg': person_data['right_hand']['insideImg'], 'occluded': person_data['right_hand']['self_occluded'],
'overlap': person_data['right_hand']['overlap']}
for _, value in map_id.items():
if seqName == '171204_pose5' or seqName == '171204_pose6':
testing_data.append(value)
else:
training_data.append(value)
with open('./a4plus_collected.pkl', 'wb') as f:
pickle.dump({'training_data': training_data, 'testing_data': testing_data}, f)
| body2hands-main | visualization/POF/data/collect_a4.py |
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
import pickle
from utils.keypoint_conversion import a4_to_main as order_dict
import json
import os
class OpenposeReader(BaseReader):
def __init__(self, seqName, mode='evaluation', objtype=0, shuffle=False, batch_size=1, crop_noise=False):
super(OpenposeReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
assert mode == 'evaluation'
assert objtype == 2
self.image_root = '/media/posefs1b/Users/donglaix/siggasia018/{}/'.format(seqName)
assert os.path.isdir(self.image_root)
path_to_db = './data/{}.pkl'.format(seqName)
with open(path_to_db, 'rb') as f:
db_data = pickle.load(f)
human3d = {}
num_samples = len(db_data[0])
K = np.array(db_data[5]['K'], dtype=np.float32)
K = np.expand_dims(K, axis=0)
K = np.tile(K, (num_samples, 1, 1))
human3d['K'] = K
human3d['openpose_body'] = db_data[0].astype(np.float32)[:, :18, :]
# duplicate the neck for head top and chest
human3d['openpose_body'] = np.concatenate((human3d['openpose_body'], human3d['openpose_body'][:, 1:2, :], human3d['openpose_body'][:, 1:2, :]), axis=1)
human3d['openpose_body_score'] = db_data[0][:, :18, 2].astype(np.float32)
# duplicate the neck for head top and chest
human3d['openpose_body_score'] = np.concatenate((human3d['openpose_body_score'], human3d['openpose_body_score'][:, 1:2], human3d['openpose_body_score'][:, 1:2]), axis=1)
human3d['openpose_lhand'] = db_data[1].astype(np.float32)
human3d['openpose_lhand_score'] = db_data[1][:, :, 2].astype(np.float32)
human3d['openpose_rhand'] = db_data[2].astype(np.float32)
human3d['openpose_rhand_score'] = db_data[2][:, :, 2].astype(np.float32)
human3d['openpose_face'] = db_data[3].astype(np.float32)
human3d['openpose_face_score'] = db_data[3][:, :, 2].astype(np.float32)
human3d['openpose_foot'] = db_data[0].astype(np.float32)[:, 18:, :]
human3d['openpose_foot_score'] = db_data[0].astype(np.float32)[:, 18:, 2]
human3d['img_dirs'] = np.core.defchararray.add(np.array([self.image_root]), db_data[4])
human3d['body_valid'] = np.ones((num_samples, 20), dtype=bool)
human3d['left_hand_valid'] = np.ones((num_samples, 21), dtype=bool)
human3d['right_hand_valid'] = np.ones((num_samples, 21), dtype=bool)
# dummy values
R = np.eye(3, dtype=np.float32)
R = np.expand_dims(R, axis=0)
R = np.tile(R, (num_samples, 1, 1))
human3d['R'] = R
t = np.ones((3,), dtype=np.float32)
t = np.expand_dims(t, axis=0)
t = np.tile(t, (num_samples, 1))
human3d['t'] = t
dc = np.zeros((5,), dtype=np.float32)
dc = np.expand_dims(dc, axis=0)
dc = np.tile(dc, (num_samples, 1))
human3d['distCoef'] = dc
human3d['body'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
human3d['left_hand'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
human3d['right_hand'] = np.zeros((num_samples, 21, 3), dtype=np.float32)
for key, val in human3d.items():
if 'openpose' in key and 'score' not in key:
# valid = val[:, :, 2] > 0.05
valid = val[:, :, 2] > 0.0
val[:, :, 0] *= valid
val[:, :, 1] *= valid
human3d[key] = val[:, :, :2]
self.register_tensor(human3d, order_dict)
self.num_samples = len(self.tensor_dict['img_dirs'])
def get(self, imw=1920, imh=1080):
d = super(OpenposeReader, self).get(withPAF=False, bbox2d=1, imw=imw, imh=imh)
return d
if __name__ == '__main__':
d = OpenposeReader(mode='evaluation', seqName='test3', shuffle=False, objtype=2, crop_noise=False)
data_dict = d.get()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import utils.general
from utils.vis_heatmap3d import vis_heatmap3d
validation_images = []
for i in range(d.num_samples):
print('{}/{}'.format(i + 1, d.num_samples))
bimage_crop, image, body2d, body2d_local, foot2d = sess.run([data_dict['bimage_crop'], data_dict['image'], data_dict['openpose_body'], data_dict['body_uv_local'], data_dict['openpose_foot']])
foot2d = np.squeeze(foot2d)
image_v = ((image[0] + 0.5) * 255).astype(np.uint8)
image_crop_v = ((bimage_crop[0] + 0.5) * 255).astype(np.uint8)
fig = plt.figure()
ax1 = fig.add_subplot(121)
plt.imshow(image_v)
plt.scatter(foot2d[:, 0], foot2d[:, 1])
for i in range(4):
plt.text(int(foot2d[i, 0]), int(foot2d[i, 1]), str(i))
utils.general.plot2d(ax1, body2d[0])
ax2 = fig.add_subplot(122)
plt.imshow(image_crop_v)
utils.general.plot2d(ax2, body2d_local[0])
plt.show()
| body2hands-main | visualization/POF/data/OpenposeReader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import numpy as np
import scipy.io as io
rng = np.random.RandomState(23456)
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import os
from PIL import Image,ImageDraw
class regressor_fcn_bn_32(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_image=False, default_size=256):
self.require_image = require_image
self.default_size = default_size
self.use_resnet = True
embed_size = default_size
if self.require_image:
embed_size += default_size
if self.use_resnet:
self.image_resnet_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512*2, default_size),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(default_size, momentum=0.01),
)
self.image_reduce = nn.Sequential(
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,256,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(256),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv8 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv9 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv10 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip1 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip2 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create image embedding
def process_image(self, image_):
B, T, _ = image_.shape
image_ = image_.view(-1, 512*2)
feat = self.image_resnet_postprocess(image_)
feat = feat.view(B, T, self.default_size)
feat = feat.permute(0, 2, 1).contiguous()
feat = self.image_reduce(feat)
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, image_=None):
B, T = input_.shape[0], input_.shape[2]
fourth_block = self.encoder(input_)
if self.require_image:
feat = self.process_image(image_)
fourth_block = torch.cat((fourth_block, feat), dim=1)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
eighth_block = self.conv8(seventh_block)
ninth_block = self.conv9(eighth_block)
tenth_block = self.conv10(ninth_block)
ninth_block = tenth_block + ninth_block
ninth_block = self.skip1(ninth_block)
eighth_block = ninth_block + eighth_block
eighth_block = self.skip2(eighth_block)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_discriminator(nn.Module):
def __init__(self):
super(regressor_fcn_bn_discriminator, self).__init__()
def build_net(self, feature_in_dim):
self.convs = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 64
nn.Dropout(0.5),
nn.Conv1d(64,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 32
nn.Dropout(0.5),
nn.Conv1d(64,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 16
nn.Dropout(0.5),
nn.Conv1d(32,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 8
nn.Dropout(0.5),
nn.Conv1d(32,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 4
nn.Dropout(0.5),
nn.Conv1d(16,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 2
nn.Dropout(0.5),
nn.Conv1d(16,8,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(8),
## 1
nn.Dropout(0.5),
nn.Conv1d(8,1,3,padding=1),
)
def forward(self, input_):
outputs = self.convs(input_)
return outputs
| body2hands-main | utils/modelZoo.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os, sys
import scipy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from scipy.spatial.transform import Rotation as R
from shutil import copyfile
from PIL import Image,ImageDraw
from torchvision import transforms
import torch
FEATURE_MAP = {
'arm2wh':((6*6), 42*6),
}
ARMS_ONLY = [12,13,14,15,16,17] #arms for mtc
EPSILON = 1e-10
## helper for calculating mean and standard dev
def mean_std(feat, data, rot_idx):
if feat == 'wh':
mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]
std = data.std(axis=2).std(axis=0)[np.newaxis,:, np.newaxis]
std += EPSILON
else:
mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]
std = np.array([[[data.std()]]]).repeat(data.shape[1], axis=1)
return mean, std
## helper for calculating standardization stats
def calc_standard(train_X, train_Y, pipeline):
rot_idx = -6
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
body_mean_X, body_std_X = mean_std(in_feat, train_X, rot_idx)
if in_feat == out_feat:
body_mean_Y = body_mean_X
body_std_Y = body_std_X
else:
body_mean_Y, body_std_Y = mean_std(out_feat, train_Y, rot_idx)
return body_mean_X, body_std_X, body_mean_Y, body_std_Y
## utility check if object is float
def is_float(n):
try:
float(n)
return True
except:
return False
## utility function to convert from r6d space to axis angle
def rot6d_to_aa(r6ds):
res = np.zeros((r6ds.shape[0], 3))
for i,row in enumerate(r6ds):
np_r6d = np.expand_dims(row, axis=0)
np_mat = np.reshape(np_rot6d_to_mat(np_r6d)[0], (3,3))
np_mat = R.from_matrix(np_mat)
aa = np_mat.as_rotvec()
res[i,:] = aa
return res
def np_mat_to_rot6d(np_mat):
""" Get 6D rotation representation for rotation matrix.
Implementation base on
https://arxiv.org/abs/1812.07035
[Inputs]
flattened rotation matrix (last dimension is 9)
[Returns]
6D rotation representation (last dimension is 6)
"""
shape = np_mat.shape
if not ((shape[-1] == 3 and shape[-2] == 3) or (shape[-1] == 9)):
raise AttributeError("The inputs in tf_matrix_to_rotation6d should be [...,9] or [...,3,3], \
but found tensor with shape {}".format(shape[-1]))
np_mat = np.reshape(np_mat, [-1, 3, 3])
np_r6d = np.concatenate([np_mat[...,0], np_mat[...,1]], axis=-1)
if len(shape) == 1:
np_r6d = np.reshape(np_r6d, [6])
return np_r6d
## utility function to convert from axis angle to r6d space
def aa_to_rot6d(vecs):
res = np.zeros((vecs.shape[0], 6))
for i,row in enumerate(vecs):
np_mat = R.from_rotvec(row)
np_mat = np_mat.as_dcm()
np_mat = np.expand_dims(np_mat, axis=0) #e.g. batch 1
np_r6d = np_mat_to_rot6d(np_mat)[0]
res[i,:] = np_r6d
return res
## utility function to convert from r6d space to rotation matrix
def np_rot6d_to_mat(np_r6d):
shape = np_r6d.shape
np_r6d = np.reshape(np_r6d, [-1,6])
x_raw = np_r6d[:,0:3]
y_raw = np_r6d[:,3:6]
x = x_raw / np.linalg.norm(x_raw, ord=2, axis=-1)
z = np.cross(x, y_raw)
z = z / np.linalg.norm(z, ord=2, axis=-1)
y = np.cross(z, x)
x = np.reshape(x, [-1,3,1])
y = np.reshape(y, [-1,3,1])
z = np.reshape(z, [-1,3,1])
np_matrix = np.concatenate([x,y,z], axis=-1)
if len(shape) == 1:
np_matrix = np.reshape(np_matrix, [9])
else:
output_shape = shape[:-1] + (9,)
np_matrix = np.reshape(np_matrix, output_shape)
return np_matrix
## utility to load windows from outside files
def load_windows(data_dir, pipeline, num_samples=None, use_euler=False, require_image=False, require_audio=False, hand3d_image=False, use_lazy=False, test_smpl=False, temporal=False):
preload_path = os.path.join(data_dir, 'filepaths.npy')
if os.path.exists(preload_path):
filepaths = np.load(preload_path, allow_pickle=True)
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
p0_size, p1_size = FEATURE_MAP[pipeline]
if os.path.exists(os.path.join(data_dir, 'full_bodies2.npy')):
print('using super quick load', data_dir)
p1_windows = np.load(os.path.join(data_dir, 'full_hands2.npy'), allow_pickle=True)
p0_windows = np.load(os.path.join(data_dir, 'full_bodies2.npy'), allow_pickle=True)
B,T = p0_windows.shape[0], p0_windows.shape[1]
if in_feat == 'arm':
p0_windows = np.reshape(p0_windows, (B,T,-1,6))
p0_windows = p0_windows[:,:,ARMS_ONLY,:]
p0_windows = np.reshape(p0_windows, (B,T,-1))
if require_image:
image_windows = np.load(os.path.join(data_dir, 'full_resnet.npy'), allow_pickle=True)
if require_image:
p0_windows = (p0_windows, image_windows)
return p0_windows, p1_windows, filepaths, None
## utility to save results
def save_results(paths, output, pipeline, base_path, tag=''):
feats = pipeline.split('2')
out_feat = feats[1]
paths = np.array(paths)
for i in range(paths.shape[0]):
print('working on', paths[i,0,0])
for j in range(paths.shape[1]):
vid_path, pnum, frame_idx = paths[i][j]
vid_path = os.path.join(base_path, vid_path)
if not os.path.exists(os.path.join(vid_path, 'results/')):
os.makedirs(os.path.join(vid_path, 'results/'))
if out_feat == 'wh':
pred_dir = os.path.join(vid_path, 'results/{}predicted_body_3d_frontal/'.format(tag))
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
pred_path = os.path.join(pred_dir, '{:04d}.txt'.format(int(frame_idx)))
## set the ground truth estimated full body pose parameters for viewing
gt_path = os.path.join(vid_path, 'body_3d_frontal/{:04d}.txt'.format(int(frame_idx)))
with open(gt_path) as f:
lines = f.readlines()
cam = lines[0]
cam = [float(n) for n in cam.split(' ') if is_float(n)]
pose = lines[1]
pose = [float(n) for n in pose.split(' ') if is_float(n)]
shape = lines[2]
shape = [float(n) for n in shape.split(' ') if is_float(n)]
idk = lines[3]
idk = [float(n) for n in idk.split(' ') if is_float(n)]
## DONE set the ground truth estimated full body pose parameters for viewing
## fill in the predicted hands to the full body pose
pose = np.reshape(pose, (62,3))
if out_feat == 'wh':
hands_r6d = np.reshape(output[i][j],(42,6))
hands = rot6d_to_aa(hands_r6d)
pose[-42:,:] = hands
pose = np.reshape(pose, (-1))
## DONE fill in the predicted hands to the full body pose
## writing prediciton to file
with open(pred_path, 'w') as f:
for item in cam:
f.write("%s "%item)
f.write("\n")
for item in pose:
f.write("%s "%item)
f.write("\n")
for item in shape:
f.write("%s "%item)
f.write("\n")
for item in idk:
f.write("%s "%item)
## DONE writing prediciton to file
| body2hands-main | utils/load_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import json
import numpy as np
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
import pickle
import utils.modelZoo as modelZoo
from utils.load_utils import *
ARMS_ONLY = [13,14,16,17,18,19] #arms for smpl
N = 4
## main function demo script to run body2hands on frankmocap (smplx) predictions
def main(args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
rng = np.random.RandomState(23456)
torch.manual_seed(23456)
torch.cuda.manual_seed(23456)
print('> checkpoint', args.checkpoint)
pipeline = args.pipeline
feature_in_dim, feature_out_dim = FEATURE_MAP[pipeline]
pretrain_model = args.checkpoint
tag = args.tag
######################################
# Setup model
feats = pipeline.split('2')
in_feat, out_feat = feats[0], feats[1]
checkpoint_dir = os.path.split(args.checkpoint)[0]
model_tag = os.path.basename(args.checkpoint).split(args.pipeline)[0]
preprocess = np.load(os.path.join(checkpoint_dir,'{}{}_preprocess_core.npz'.format(model_tag, args.pipeline)))
args.model = 'regressor_fcn_bn_32'
model = getattr(modelZoo,args.model)()
model.build_net(feature_in_dim, feature_out_dim)
model.cuda()
# Create model
loaded_state = torch.load(pretrain_model, map_location=lambda storage, loc: storage)
model.load_state_dict(loaded_state['state_dict'], strict=False)
model.eval()
test_X, total_body, total_cam = load_smplx(args.data_dir)
###### swap axis ######
print("seq len", test_X.shape)
test_X = np.swapaxes(test_X, 1, 2).astype(np.float32)
###### standardize ######
body_mean_X = preprocess['body_mean_X']
body_std_X = preprocess['body_std_X']
body_mean_Y = preprocess['body_mean_Y']
body_std_Y = preprocess['body_std_Y']
test_X = (test_X - body_mean_X) / body_std_X
##### convert to tensor ######
inputData = Variable(torch.from_numpy(test_X)).cuda()
# ===================forward=====================
output = model(inputData)
# De-standardaize
output_np = output.data.cpu().numpy()
output_np = output_np * body_std_Y + body_mean_Y
output_np = np.swapaxes(output_np, 1, 2).astype(np.float32)
### saving as output in MTC format
save_output(output_np, total_body, total_cam, 'models/', args.pipeline, tag=args.tag)
## process to save smplx based prediction to mtc format
def save_output(output, total_body, total_cam, model_path, pipeline, tag):
feats = pipeline.split('2')
out_feat = feats[1]
start = 0
for j in range(N):
frame_idx = start+j
save_dir = os.path.join(args.data_dir, 'results/{}predicted_body_3d_frontal'.format(tag))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, '{:04d}.txt'.format(int(frame_idx)))
## note camera differences for visualization between MTC and frankmocap,
## so we just use a frontal default camera.
cam = [-12.9248, 51.8431, 209.5]
shape = np.zeros(30)
idk = np.zeros(200)
## load output from smpl body pose
pose = np.zeros((62,3))
pose[:20,:] = np.reshape(total_body[0][j], (-1,3))[:20,:]
## load predicted hands (convert from 6d to 3d)
hands_r6d = np.reshape(output[0][j],(42,6))
hands = rot6d_to_aa(hands_r6d)
pose[-42:,:] = hands
pose = np.reshape(pose, (-1))
## save in MTC format
with open(save_path, 'w') as f:
for item in cam:
f.write("%s "%item)
f.write("\n")
for item in pose:
f.write("%s "%item)
f.write("\n")
for item in shape:
f.write("%s "%item)
f.write("\n")
for item in idk:
f.write("%s "%item)
## function to load smplx data from frankmocap plugin
def load_smplx(data_dir):
result = np.zeros((N,36))
body_result = np.zeros((N,72))
cam_result = np.zeros((N,3))
start = 0
## body_result contains original full body smpl (in original aa)
## result contains arms only smpl (in r6d)
for i in range(N):
file_path = os.path.join(args.data_dir, '{:05d}_prediction_result.pkl'.format(i+start))
with open(file_path, 'rb') as f:
data = pickle.load(f)
cam = data['pred_output_list'][0]['pred_camera']
cam_result[i,:] = cam
body = data['pred_output_list'][0]['pred_body_pose']
body *= -1
body_result[i,:] = body
# convert aa to r6d
body = np.reshape(body, (-1, 3))
body = aa_to_rot6d(body)
body = np.reshape(body[ARMS_ONLY,:], (-1))
result[i,:] = body
## apply additional smoothing to original smpl for nice visualization
body_result = body_result[np.newaxis,:,:]
outputs_smoothed = np.copy(body_result)
cam_result = cam_result[np.newaxis,:,:]
cam_smoothed = np.copy(cam_result)
for i in range(2, body_result.shape[1]-2):
outputs_smoothed[:,i,:] = body_result[:,i-2,:]*0.1 + body_result[:,i-1,:]*0.2 + body_result[:,i,:]*0.4 + body_result[:,i+1,:]*0.2 + body_result[:,i+2,:]*0.1
cam_smoothed[:,i,:] = cam_result[:,i-2,:]*0.1 + cam_result[:,i-1,:]*0.2 + cam_result[:,i,:]*0.4 + cam_result[:,i+1,:]*0.2 + cam_result[:,i+2,:]*0.1
return result[np.newaxis,:,:], outputs_smoothed, cam_smoothed
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True, help='path to pretrained model')
parser.add_argument('--data_dir', type=str, required=True, help='input data directory with frankmocap output')
parser.add_argument('--pipeline', type=str, default='arm2wh', help='pipeline to run')
parser.add_argument('--tag', type=str, default='mocap_')
args = parser.parse_args()
print(args)
main(args)
| body2hands-main | smplx_plugin/demo.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.