python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfdbg CLI as SessionRunHook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.training import session_run_hook
class LocalCLIDebugHook(session_run_hook.SessionRunHook):
"""Command-line-interface debugger hook.
Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and
`tf.estimator.Estimator`s. Provides a substitute for
`tfdbg.LocalCLIDebugWrapperSession` in cases where the session is not directly
available.
"""
def __init__(self, ui_type="curses", dump_root=None, thread_name_filter=None):
"""Create a local debugger command-line interface (CLI) hook.
Args:
ui_type: (`str`) requested user-interface type. Currently supported:
(curses | readline).
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
"""
self._ui_type = ui_type
self._dump_root = dump_root
self._thread_name_filter = thread_name_filter
self._session_wrapper = None
self._pending_tensor_filters = {}
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
Override default behavior to accommodate the possibility of this method
being
called prior to the initialization of the underlying
`LocalCLIDebugWrapperSession` object.
Args:
filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`
for details.
tensor_filter: See doc of
`LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
"""
if self._session_wrapper:
self._session_wrapper.add_tensor_filter(filter_name, tensor_filter)
else:
self._pending_tensor_filters[filter_name] = tensor_filter
def begin(self):
pass
def before_run(self, run_context):
if not self._session_wrapper:
self._session_wrapper = local_cli_wrapper.LocalCLIDebugWrapperSession(
run_context.session,
ui_type=self._ui_type,
dump_root=self._dump_root,
thread_name_filter=self._thread_name_filter)
# Actually register tensor filters registered prior to the construction
# of the underlying LocalCLIDebugWrapperSession object.
for filter_name in self._pending_tensor_filters:
self._session_wrapper.add_tensor_filter(
filter_name, self._pending_tensor_filters[filter_name])
# Increment run call counter.
self._session_wrapper.increment_run_call_count()
# Adapt run_context to an instance of OnRunStartRequest for invoking
# superclass on_run_start().
on_run_start_request = framework.OnRunStartRequest(
run_context.original_args.fetches, run_context.original_args.feed_dict,
None, None, self._session_wrapper.run_call_count)
on_run_start_response = self._session_wrapper.on_run_start(
on_run_start_request)
self._performed_action = on_run_start_response.action
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=config_pb2.RunOptions())
if self._performed_action == framework.OnRunStartAction.DEBUG_RUN:
# pylint: disable=protected-access
self._session_wrapper._decorate_run_options_for_debug(
run_args.options,
on_run_start_response.debug_urls,
debug_ops=on_run_start_response.debug_ops,
node_name_regex_whitelist=(
on_run_start_response.node_name_regex_whitelist),
op_type_regex_whitelist=(
on_run_start_response.op_type_regex_whitelist),
tensor_dtype_regex_whitelist=(
on_run_start_response.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
on_run_start_response.tolerate_debug_op_creation_failures))
# pylint: enable=protected-access
elif self._performed_action == framework.OnRunStartAction.PROFILE_RUN:
# pylint: disable=protected-access
self._session_wrapper._decorate_run_options_for_profile(run_args.options)
# pylint: enable=protected-access
return run_args
def after_run(self, run_context, run_values):
# Adapt run_context and run_values to OnRunEndRequest and invoke superclass
# on_run_end()
on_run_end_request = framework.OnRunEndRequest(self._performed_action,
run_values.run_metadata)
self._session_wrapper.on_run_end(on_run_end_request)
class DumpingDebugHook(session_run_hook.SessionRunHook):
"""A debugger hook that dumps debug data to filesystem.
Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and
`tf.estimator.Estimator`s.
"""
def __init__(self,
session_root,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Create a local debugger command-line interface (CLI) hook.
Args:
session_root: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
watch_fn: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
"""
self._session_root = session_root
self._watch_fn = watch_fn
self._thread_name_filter = thread_name_filter
self._log_usage = log_usage
self._session_wrapper = None
def begin(self):
pass
def before_run(self, run_context):
reset_disk_byte_usage = False
if not self._session_wrapper:
self._session_wrapper = dumping_wrapper.DumpingDebugWrapperSession(
run_context.session,
self._session_root,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
reset_disk_byte_usage = True
self._session_wrapper.increment_run_call_count()
# pylint: disable=protected-access
debug_urls, watch_options = self._session_wrapper._prepare_run_watch_config(
run_context.original_args.fetches, run_context.original_args.feed_dict)
# pylint: enable=protected-access
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=debug_urls,
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures),
reset_disk_byte_usage=reset_disk_byte_usage)
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
return run_args
def after_run(self, run_context, run_values):
pass
class GrpcDebugHook(session_run_hook.SessionRunHook):
"""A hook that streams debugger-related events to any grpc_debug_server.
For example, the debugger data server is a grpc_debug_server. The debugger
data server writes debugger-related events it receives via GRPC to logdir.
This enables debugging features in Tensorboard such as health pills.
When the arguments of debug_utils.watch_graph changes, strongly consider
changing arguments here too so that features are available to tflearn users.
Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and
`tf.estimator.Estimator`s.
"""
def __init__(self,
grpc_debug_server_addresses,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Constructs a GrpcDebugHook.
Args:
grpc_debug_server_addresses: (`list` of `str`) A list of the gRPC debug
server addresses, in the format of <host:port>, with or without the
"grpc://" prefix. For example: ["localhost:7000", "192.168.0.2:8000"]
watch_fn: A function that allows for customizing which ops to watch at
which specific steps. See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
"""
self._grpc_debug_wrapper_session = None
self._thread_name_filter = thread_name_filter
self._grpc_debug_server_addresses = (
grpc_debug_server_addresses
if isinstance(grpc_debug_server_addresses, list) else
[grpc_debug_server_addresses])
self._watch_fn = watch_fn
self._log_usage = log_usage
def before_run(self, run_context):
"""Called right before a session is run.
Args:
run_context: A session_run_hook.SessionRunContext. Encapsulates
information on the run.
Returns:
A session_run_hook.SessionRunArgs object.
"""
if not self._grpc_debug_wrapper_session:
self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(
run_context.session,
self._grpc_debug_server_addresses,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
fetches = run_context.original_args.fetches
feed_dict = run_context.original_args.feed_dict
watch_options = self._watch_fn(fetches, feed_dict)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(
fetches, feed_dict),
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
return session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
class TensorBoardDebugHook(GrpcDebugHook):
"""A tfdbg hook that can be used with TensorBoard Debugger Plugin.
This hook is the same as `GrpcDebugHook`, except that it uses a predefined
`watch_fn` that
1) uses `DebugIdentity` debug ops with the `gated_grpc` attribute set to
`True`, to allow the interactive enabling and disabling of tensor
breakpoints.
2) watches all tensors in the graph.
This saves the need for the user to define a `watch_fn`.
"""
def __init__(self,
grpc_debug_server_addresses,
thread_name_filter=None,
send_traceback_and_source_code=True,
log_usage=True):
"""Constructor of TensorBoardDebugHook.
Args:
grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a
`str` or a `list` of `str`s. E.g., "localhost:2333",
"grpc://localhost:2333", ["192.168.0.7:2333", "192.168.0.8:2333"].
thread_name_filter: Optional filter for thread names.
send_traceback_and_source_code: Whether traceback of graph elements and
the source code are to be sent to the debug server(s).
log_usage: Whether the usage of this class is to be logged (if
applicable).
"""
def _gated_grpc_watch_fn(fetches, feeds):
del fetches, feeds # Unused.
return framework.WatchOptions(
debug_ops=["DebugIdentity(gated_grpc=true)"])
super(TensorBoardDebugHook, self).__init__(
grpc_debug_server_addresses,
watch_fn=_gated_grpc_watch_fn,
thread_name_filter=thread_name_filter,
log_usage=log_usage)
self._grpc_debug_server_addresses = grpc_debug_server_addresses
self._send_traceback_and_source_code = send_traceback_and_source_code
self._sent_graph_version = -1
grpc_wrapper.register_signal_handler()
def before_run(self, run_context):
if self._send_traceback_and_source_code:
self._sent_graph_version = grpc_wrapper.publish_traceback(
self._grpc_debug_server_addresses, run_context.session.graph,
run_context.original_args.feed_dict,
run_context.original_args.fetches, self._sent_graph_version)
return super(TensorBoardDebugHook, self).before_run(run_context)
|
tensorflow-master
|
tensorflow/python/debug/wrappers/hooks.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.util import tf_inspect
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
@test_util.run_deprecated_v1
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session(config=self._no_rewrite_session_config())
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s.eval())
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertAllClose(
[[11.0], [-1.0]],
sess.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(2, self._observer["on_run_start_count"])
self.assertEqual(self._q, self._observer["run_fetches"])
self.assertEqual(2, self._observer["on_run_end_count"])
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, self.evaluate(foo))
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def _is_public_method_name(method_name):
return (method_name.startswith("__") and method_name.endswith("__")
or not method_name.startswith("_"))
class SessionWrapperPublicMethodParityTest(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/wrappers/framework_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger wrapper session that dumps debug data to file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
# Google-internal import(s).
from tensorflow.core.util import event_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.platform import gfile
class DumpingDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):
"""Debug Session wrapper that dumps debug data to filesystem."""
def __init__(self,
sess,
session_root,
watch_fn=None,
thread_name_filter=None,
pass_through_operrors=None,
log_usage=True):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
session_root: (`str`) Path to the session root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`tf.Session.run`
calls.
As the `run()` calls occur, subdirectories will be added to
`session_root`. The subdirectories' names has the following pattern:
run_<epoch_time_stamp>_<zero_based_run_counter>
E.g., run_1480734393835964_ad4c953a85444900ae79fc1b652fb324
watch_fn: (`Callable`) A Callable that can be used to define per-run
debug ops and watched tensors. See the doc of
`NonInteractiveDebugWrapperSession.__init__()` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
pass_through_operrors: If true, all captured OpErrors will be
propagated. By default this captures all OpErrors.
log_usage: (`bool`) whether the usage of this class is to be logged.
Raises:
ValueError: If `session_root` is an existing and non-empty directory or
if `session_root` is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.NonInteractiveDebugWrapperSession.__init__(
self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter,
pass_through_operrors=pass_through_operrors)
session_root = os.path.expanduser(session_root)
if gfile.Exists(session_root):
if not gfile.IsDirectory(session_root):
raise ValueError(
"session_root path points to a file: %s" % session_root)
elif gfile.ListDirectory(session_root):
raise ValueError(
"session_root path points to a non-empty directory: %s" %
session_root)
else:
gfile.MakeDirs(session_root)
self._session_root = session_root
self._run_counter = 0
self._run_counter_lock = threading.Lock()
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Implementation of abstrat method in superclass.
See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
for details. This implementation creates a run-specific subdirectory under
self._session_root and stores information regarding run `fetches` and
`feed_dict.keys()` in the subdirectory.
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
this `Session.run()` call.
"""
# Add a UUID to accommodate the possibility of concurrent run() calls.
self._run_counter_lock.acquire()
run_dir = os.path.join(self._session_root, "run_%d_%d" %
(int(time.time() * 1e6), self._run_counter))
self._run_counter += 1
self._run_counter_lock.release()
gfile.MkDir(run_dir)
fetches_event = event_pb2.Event()
fetches_event.log_message.message = repr(fetches)
fetches_path = os.path.join(
run_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
with gfile.Open(os.path.join(fetches_path), "wb") as f:
f.write(fetches_event.SerializeToString())
feed_keys_event = event_pb2.Event()
feed_keys_event.log_message.message = (repr(feed_dict.keys()) if feed_dict
else repr(feed_dict))
feed_keys_path = os.path.join(
run_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)
with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
f.write(feed_keys_event.SerializeToString())
return ["file://" + run_dir]
|
tensorflow-master
|
tensorflow/python/debug/wrappers/dumping_wrapper.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import re
import threading
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import monitored_session
from tensorflow.python.util import nest
# Helper function.
def _check_type(obj, expected_types):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_types: (`type` or an iterable of `type`s) The expected `type`(s)
of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_types):
raise TypeError("Expected type %s; got type %s" %
(expected_types, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count, is_callable_runner=False):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
self.is_callable_runner = is_callable_runner
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run once with profiler.
PROFILE_RUN = "profile_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
@six.add_metaclass(abc.ABCMeta)
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
def __init__(self, sess, thread_name_filter=None,
pass_through_operrors=False):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance. It should be a subtype
of `BaseSession` or `tf.MonitoredSession`.
thread_name_filter: Regular-expression filter (whitelist) for name(s) of
thread(s) on which the wrapper session will be active. This regular
expression is used in a start-anchored fashion on the thread name, i.e.,
by applying the `match` method of the compiled pattern. The default
`None` means that the wrapper session will be active on all threads.
E.g., r"MainThread$", r"QueueRunnerThread.*".
pass_through_operrors: If True, all captured OpErrors will be
propagated. By default this captures all OpErrors.
Raises:
ValueError: On invalid `OnSessionInitAction` value.
NotImplementedError: If a non-DirectSession sess object is received.
"""
_check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))
# The session being wrapped.
self._sess = sess
self._thread_name_filter_pattern = (re.compile(thread_name_filter)
if thread_name_filter else None)
# TODO(cais/kstevens): Unittest this pass through feature.
self._pass_through_operrors = pass_through_operrors
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session. The count can be used for purposes such as
# displaying the state of the Session in a UI and determining a run
# number-dependent debug URL.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
self._default_session_context_manager = None
# A cache for callables created from CallableOptions.
self._cached_callables_from_options = {}
@property
def graph(self):
return self._sess.graph
@property
def graph_def(self):
return self._sess.graph_def
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def run(self,
fetches,
feed_dict=None,
options=None,
run_metadata=None,
callable_runner=None,
callable_runner_args=None,
callable_options=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
callable_runner: A `callable` returned by `Session.make_callable()`.
If not `None`, `fetches` and `feed_dict` must both be `None`.
Mutually exclusive with `callable_options`.
callable_runner_args: An optional list of arguments to `callable_runner`
or for `callable_options`.
callable_options: An instance of `config_pb2.CallableOptions`, to be
used with `Session._make_callable_from_options()`. Mutually exclusive
with `callable_runner`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value. Or if `callable_runner`
is not `None` and either or both of `fetches` and `feed_dict` is `None`.
"""
if callable_runner and callable_options:
raise ValueError(
"callable_runner and callable_options are mutually exclusive, but "
"are both specified in this call to BaseDebugWrapperSession.run().")
if callable_runner and (fetches or feed_dict):
raise ValueError(
"callable_runner and fetches/feed_dict are mutually exclusive, "
"but are used simultaneously.")
elif callable_options and (fetches or feed_dict):
raise ValueError(
"callable_options and fetches/feed_dict are mutually exclusive, "
"but are used simultaneously.")
self.increment_run_call_count()
def is_empty(x):
"""Check whether a possibly nested structure is empty."""
if not nest.is_nested(x):
return False
if isinstance(x, collections.Mapping):
return is_empty(list(x.values()))
for item in x:
if not is_empty(item):
return False
return True
empty_fetches = is_empty(fetches)
if empty_fetches:
tf_logging.info(
"Due to empty fetches, tfdbg Session wrapper is letting a "
"Session.run pass through without any debugging actions.")
if self._is_disabled_thread() or empty_fetches:
if callable_runner:
return callable_runner(*callable_runner_args)
elif callable_options:
# pylint:disable=protected-access
return self._sess._make_callable_from_options(
callable_options)(*callable_runner_args)
# pylint:enable=protected-access
else:
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count,
is_callable_runner=bool(callable_runner)))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
retvals, run_end_req = self._run_with_debugging(
run_start_resp, fetches, feed_dict, options, run_metadata,
callable_runner, callable_runner_args, callable_options)
elif run_start_resp.action == OnRunStartAction.PROFILE_RUN:
retvals, run_end_req = self._run_with_profiling(
run_start_resp, fetches, feed_dict, options, run_metadata,
callable_runner, callable_runner_args, callable_options)
elif run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN:
# Invoke run() method of the wrapped session.
if callable_runner:
retvals = callable_runner(*callable_runner_args)
elif callable_options:
# pylint:disable=protected-access
callable_object = self._sess._make_callable_from_options(
callable_options)
# pylint:enable=protected-access
retvals = callable_object(*callable_runner_args)
else:
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def _run_with_debugging(self,
run_start_resp,
fetches,
feed_dict,
options,
run_metadata,
callable_runner,
callable_runner_args,
callable_options):
"""Perform a session.run() or callable with debugging."""
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = None
if callable_options:
callable_options_id = id(callable_options)
if callable_options_id not in self._cached_callables_from_options:
# Make a copy of callable_options to avoid mutating it.
new_callable_options = config_pb2.CallableOptions()
new_callable_options.CopyFrom(callable_options)
decorated_run_options = new_callable_options.run_options
else:
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
if decorated_run_options:
self._decorate_run_options_for_debug(
decorated_run_options,
run_start_resp.debug_urls,
debug_ops=run_start_resp.debug_ops,
node_name_regex_whitelist=(
run_start_resp.node_name_regex_whitelist),
op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=(
run_start_resp.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
if callable_runner:
retvals = callable_runner(*callable_runner_args,
options=decorated_run_options,
run_metadata=run_metadata)
elif callable_options:
# pylint:disable=protected-access
if callable_options_id in self._cached_callables_from_options:
callable_object = self._cached_callables_from_options[
callable_options_id]
else:
callable_object = self._sess._make_callable_from_options(
new_callable_options)
self._cached_callables_from_options[
callable_options_id] = callable_object
# pylint:enable=protected-access
retvals = callable_object(
*callable_runner_args, run_metadata=run_metadata)
else:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
if self._pass_through_operrors:
raise op_error
tf_error = op_error
retvals = op_error
return retvals, OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
def _run_with_profiling(self,
run_start_resp,
fetches,
feed_dict,
options,
run_metadata,
callable_runner,
callable_runner_args,
callable_options):
"""Perform a session.run() or callable with profiling."""
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = None
if callable_options:
callable_options_id = id(callable_options)
if callable_options_id not in self._cached_callables_from_options:
# Make a copy of callable_options to avoid mutating it.
new_callable_options = config_pb2.CallableOptions()
new_callable_options.CopyFrom(callable_options)
decorated_run_options = new_callable_options.run_options
else:
decorated_run_options = options or config_pb2.RunOptions()
self._decorate_run_options_for_profile(decorated_run_options)
run_metadata = run_metadata or config_pb2.RunMetadata()
if callable_runner:
retvals = callable_runner(*callable_runner_args,
options=decorated_run_options,
run_metadata=run_metadata)
elif callable_options:
# pylint:disable=protected-access
callable_object = self._sess._make_callable_from_options(
new_callable_options)
# pylint:enable=protected-access
retvals = callable_object(
*callable_runner_args, run_metadata=run_metadata)
else:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
return retvals, OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def())
def _is_disabled_thread(self):
thread_name = threading.current_thread().name or ""
return (self._thread_name_filter_pattern and
not self._thread_name_filter_pattern.match(thread_name))
def run_step_fn(self, step_fn):
return step_fn(
monitored_session.MonitoredSession.StepContext(self._sess, self.run))
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def list_devices(self, *args, **kwargs):
return self._sess.list_devices(*args, **kwargs)
def reset(self, *args, **kwargs):
return self._sess.reset(*args, **kwargs)
def make_callable(self,
fetches,
feed_list=None,
accept_options=False):
runner = self._sess.make_callable(
fetches, feed_list=feed_list, accept_options=True)
def wrapped_runner(*runner_args, **kwargs):
return self.run(None,
feed_dict=None,
options=kwargs.get("options", None),
run_metadata=kwargs.get("run_metadata", None),
callable_runner=runner,
callable_runner_args=runner_args)
return wrapped_runner
def _make_callable_from_options(self, callable_options):
def wrapped_runner(*feed_values, **kwargs):
return self.run(None,
run_metadata=kwargs.get("run_metadata", None),
callable_options=callable_options,
callable_runner_args=feed_values)
return wrapped_runner
@property
def run_call_count(self):
return self._run_call_count
def increment_run_call_count(self):
self._run_call_count += 1
def _is_disk_usage_reset_each_run(self):
"""Indicates whether disk usage is reset after each Session.run.
Subclasses that clean up the disk usage after every run should
override this protected method.
Returns:
(`bool`) Whether the disk usage amount is reset to zero after
each Session.run.
"""
return False
def _decorate_run_options_for_debug(
self,
run_options,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
debug_ops: (str or list of str) debug op(s) to be used by the debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options,
self._sess.graph,
debug_urls=debug_urls,
debug_ops=debug_ops,
node_name_regex_whitelist=node_name_regex_whitelist,
op_type_regex_whitelist=op_type_regex_whitelist,
tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
reset_disk_byte_usage=(self._run_call_count == 1 or
self._is_disk_usage_reset_each_run()))
def _decorate_run_options_for_profile(self, run_options):
"""Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object.
"""
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def as_default(self):
return ops.default_session(self)
def __enter__(self):
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
def __del__(self):
if hasattr(self._sess, "__del__"):
self._sess.__del__()
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
def should_stop(self):
if hasattr(self._sess, "should_stop"):
return self._sess.should_stop()
else:
raise ValueError(
"The wrapped session %r does not have a method called 'should_stop'. "
"Do you intend to wrap a tf.MonitoredSession instead?" % self._sess)
class WatchOptions(object):
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-expression whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, "
"op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, "
"tolerate_debug_op_creation_failures=%r)" % (
self.debug_ops, self.node_name_regex_whitelist,
self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist,
self.tolerate_debug_op_creation_failures))
class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):
"""Base class for non-interactive (i.e., non-CLI) debug wrapper sessions."""
def __init__(self, sess, watch_fn=None, thread_name_filter=None,
pass_through_operrors=False):
"""Constructor of NonInteractiveDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a
debugged `Session.run()` call to `WatchOptions.`
* Args:
* `fetches`: the fetches to the `Session.run()` call.
* `feeds`: the feeds to the `Session.run()` call.
* Returns:
(`tf_debug.WatchOptions`) An object containing debug options including
the debug ops to use, the node names, op types and/or tensor data
types to watch, etc. See the documentation of `tf_debug.WatchOptions`
for more details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
pass_through_operrors: If true, all captured OpErrors will be
propagated. By default this captures all OpErrors.
Raises:
TypeError: If a non-None `watch_fn` is specified and it is not callable.
"""
BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter,
pass_through_operrors=pass_through_operrors)
self._watch_fn = None
if watch_fn is not None:
if not callable(watch_fn):
raise TypeError("watch_fn is not callable")
self._watch_fn = watch_fn
def on_session_init(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
return OnSessionInitResponse(OnSessionInitAction.PROCEED)
@abc.abstractmethod
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Abstract method to be implemented by concrete subclasses.
This method prepares the run-specific debug URL(s).
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) Debug URLs to be used in
this `Session.run()` call.
"""
def on_run_start(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
debug_urls, watch_opts = self._prepare_run_watch_config(
request.fetches, request.feed_dict)
return OnRunStartResponse(
OnRunStartAction.DEBUG_RUN,
debug_urls,
debug_ops=watch_opts.debug_ops,
node_name_regex_whitelist=watch_opts.node_name_regex_whitelist,
op_type_regex_whitelist=watch_opts.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_opts.tolerate_debug_op_creation_failures))
def _prepare_run_watch_config(self, fetches, feed_dict):
"""Get the debug_urls, and node/op whitelists for the current run() call.
Args:
fetches: Same as the `fetches` argument to `Session.run()`.
feed_dict: Same as the `feed_dict argument` to `Session.run()`.
Returns:
debug_urls: (str or list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
watch_options: (WatchOptions) The return value of a watch_fn, containing
options including debug_ops, and whitelists.
"""
debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)
if self._watch_fn is None:
watch_options = WatchOptions()
else:
watch_options = self._watch_fn(fetches, feed_dict)
if isinstance(watch_options, tuple):
# For legacy return type (tuples).
watch_options = WatchOptions(*watch_options)
return debug_urls, watch_options
def on_run_end(self, request):
"""See doc of BaseDebugWrapperSession.on_run_end."""
return OnRunEndResponse()
|
tensorflow-master
|
tensorflow/python/debug/wrappers/framework.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import profile_analyzer_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self,
sess,
dump_root=None,
log_usage=True,
ui_type="curses",
thread_name_filter=None):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards. If `None`, the debug dumps will
be at tfdbg_<random_string> under the system temp directory.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
thread_name_filter: Regular-expression white list for thread name. See
the doc of `BaseDebugWrapperSession` for details.
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
if not dump_root:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
dump_root = os.path.expanduser(dump_root)
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Register frequently-used filter(s).
self.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan)
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._active_filter_exclude_node_names = None
self._active_tensor_filter_run_start_response = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
def _is_disk_usage_reset_each_run(self):
# The dumped tensors are all cleaned up after every Session.run
# in a command-line wrapper.
return True
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
ap.add_argument(
"-fenn",
"--filter_exclude_node_names",
dest="filter_exclude_node_names",
type=str,
default="",
help="When applying the tensor filter, exclude node with names "
"matching the regular expression. Applicable only if --tensor_filter "
"or -f is used.")
ap.add_argument(
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="Regular-expression filter for node names to be watched in the "
"run, e.g., loss, reshape.*")
ap.add_argument(
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="Regular-expression filter for op type to be watched in the run, "
"e.g., (MatMul|Add), Variable.*")
ap.add_argument(
"--tensor_dtype_filter",
dest="tensor_dtype_filter",
type=str,
default="",
help="Regular-expression filter for tensor dtype to be watched in the "
"run, e.g., (float32|float64), int.*")
ap.add_argument(
"-p",
"--profile",
dest="profile",
action="store_true",
help="Run and profile TensorFlow graph execution.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
self._argparsers["print_feed"] = command_parser.get_print_tensor_argparser(
"Print the value of a feed in feed_dict.")
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Args:
request: An instance of `OnRunStartRequest`.
Returns:
An instance of `OnRunStartResponse`.
"""
self._is_run_start = True
self._update_run_calls_state(
request.run_call_count, request.fetches, request.feed_dict,
is_callable_runner=request.is_callable_runner)
if self._active_tensor_filter:
# If we are running until a filter passes, we just need to keep running
# with the previous `OnRunStartResponse`.
return self._active_tensor_filter_run_start_response
self._exit_if_requested_by_user()
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return (self._run_start_response or
framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls()))
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._active_tensor_filter:
self._active_tensor_filter_run_start_response = self._run_start_response
if self._run_through_times > 1:
self._run_through_times -= 1
self._exit_if_requested_by_user()
return self._run_start_response
def _exit_if_requested_by_user(self):
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.get_tensorflow_version_lines())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
self._is_run_start = False
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
if request.tf_error and not os.path.isdir(self._dump_root):
# It is possible that the dump root may not exist due to errors that
# have occurred prior to graph execution (e.g., invalid device
# assignments), in which case we will just raise the exception as the
# unwrapped Session does.
raise request.tf_error
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
passed_filter_exclude_node_names = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1,
exclude_node_names=self._active_filter_exclude_node_names):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
passed_filter_exclude_node_names = (
self._active_filter_exclude_node_names)
self._active_tensor_filter = None
self._active_filter_exclude_node_names = None
self._prep_debug_cli_for_run_end(
debug_dump, request.tf_error, passed_filter,
passed_filter_exclude_node_names)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata)
self._run_start_response = self._launch_cli()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _prep_debug_cli_for_run_end(self,
debug_dump,
tf_error,
passed_filter,
passed_filter_exclude_node_names):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
passed_filter_exclude_node_names: (None or str) Regular expression used
with the tensor filter to exclude ops with names matching the regular
expresssion.
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
if passed_filter_exclude_node_names:
self._init_command += (" --filter_exclude_node_names %s" %
passed_filter_exclude_node_names)
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump, self._tensor_filters, ui_type=self._ui_type,
on_ui_exit=self._remove_dump_root)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self._init_command = "lp"
self._run_cli = profile_analyzer_cli.create_profiler_ui(
py_graph, run_metadata, ui_type=self._ui_type,
config=self._run_cli.config)
self._title = "run-end (profiler mode): " + self._run_description
def _launch_cli(self):
"""Launch the interactive command-line interface.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
output.extend(cli_shared.get_tfdbg_logo())
output.extend(debugger_cli_common.get_tensorflow_version_lines())
output.extend(self._run_info)
if (not self._is_run_start and
debugger_cli_common.MAIN_MENU_KEY in output.annotations):
menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
if "list_tensors" not in menu.captions():
menu.insert(
0, debugger_cli_common.MenuItem("list_tensors", "list_tensors"))
return output
def _print_feed_handler(self, args, screen_info=None):
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
if not self._feed_dict:
return cli_shared.error(
"The feed_dict of the current run is None or empty.")
parsed = self._argparsers["print_feed"].parse_args(args)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
feed_key = None
feed_value = None
for key in self._feed_dict:
key_name = common.get_graph_element_name(key)
if key_name == tensor_name:
feed_key = key_name
feed_value = self._feed_dict[key]
break
if feed_key is None:
return cli_shared.error(
"The feed_dict of the current run does not contain the key %s" %
tensor_name)
else:
return cli_shared.format_tensor(
feed_value,
feed_key + " (feed)",
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
include_numeric_summary=parsed.numeric_summary)
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
del screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
parsed.node_name_filter = parsed.node_name_filter or None
parsed.op_type_filter = parsed.op_type_filter or None
parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None
if parsed.filter_exclude_node_names and not parsed.till_filter_pass:
raise ValueError(
"The --filter_exclude_node_names (or -feon) flag is valid only if "
"the --till_filter_pass (or -f) flag is used.")
if parsed.profile:
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.PROFILE_RUN, []))
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
run_start_response = framework.OnRunStartResponse(
action,
debug_urls,
node_name_regex_whitelist=parsed.node_name_filter,
op_type_regex_whitelist=parsed.op_type_filter,
tensor_dtype_regex_whitelist=parsed.tensor_dtype_filter)
if parsed.till_filter_pass:
# For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
self._active_filter_exclude_node_names = (
parsed.filter_exclude_node_names)
self._active_tensor_filter_run_start_response = run_start_response
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(exit_token=run_start_response)
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
curses_cli.register_command_handler(
"print_feed",
self._print_feed_handler,
self._argparsers["print_feed"].format_help(),
prefix_aliases=["pf"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
if self._feed_dict and hasattr(self._feed_dict, "keys"):
# Register tab completion for feed_dict keys.
feed_keys = [common.get_graph_element_name(key)
for key in self._feed_dict.keys()]
curses_cli.register_tab_comp_context(["print_feed", "pf"], feed_keys)
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self,
run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self._run_call_count = run_call_count
self._feed_dict = feed_dict
self._run_description = cli_shared.get_run_short_description(
run_call_count,
fetches,
feed_dict,
is_callable_runner=is_callable_runner)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(
run_call_count,
fetches,
feed_dict,
self._tensor_filters,
is_callable_runner=is_callable_runner)
|
tensorflow-master
|
tensorflow/python/debug/wrappers/local_cli_wrapper.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Tests for classes in dumping_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import threading
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.VariableV1(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self.session_root):
shutil.rmtree(self.session_root)
def _assert_correct_run_subdir_naming(self, run_subdir):
self.assertStartsWith(run_subdir, "run_")
self.assertEqual(2, run_subdir.count("_"))
self.assertGreater(int(run_subdir.split("_")[1]), 0)
def testConstructWrapperWithExistingNonEmptyRootDirRaisesException(self):
dir_path = os.path.join(self.session_root, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "session_root path points to a non-empty directory"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=self.session_root, log_usage=False)
def testConstructWrapperWithExistingFileDumpRootRaisesException(self):
file_path = os.path.join(self.session_root, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(gfile.Exists(file_path))
self.assertFalse(gfile.IsDirectory(file_path))
with self.assertRaisesRegexp(ValueError,
"session_root path points to a file"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=file_path, log_usage=False)
def testConstructWrapperWithNonexistentSessionRootCreatesDirectory(self):
new_dir_path = os.path.join(tempfile.mkdtemp(), "new_dir")
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=new_dir_path, log_usage=False)
self.assertTrue(gfile.IsDirectory(new_dir_path))
# Cleanup.
gfile.DeleteRecursively(new_dir_path)
def testDumpingOnASingleRunWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingOnASingleRunWorksWithRelativePathForDebugDumpDir(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
cwd = os.getcwd()
try:
os.chdir(self.session_root)
dump = debug_data.DebugDumpDir(
os.path.relpath(dump_dirs[0], self.session_root))
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
finally:
os.chdir(cwd)
def testDumpingOnASingleRunWithFeedDictWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
feed_dict = {self.ph: 3.2}
sess.run(self.inc_w_ph, feed_dict=feed_dict)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_w_ph), dump.run_fetches_info)
self.assertEqual(repr(feed_dict.keys()), dump.run_feed_keys_info)
def testDumpingOnMultipleRunsWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(3, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testUsingNonCallableAsWatchFnRaisesTypeError(self):
bad_watch_fn = "bad_watch_fn"
with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=bad_watch_fn,
log_usage=False)
def testDumpingWithLegacyWatchFnOnFetchesWorks(self):
"""Use a watch_fn that returns different whitelists for different runs."""
def watch_fn(fetches, feeds):
del feeds
# A watch_fn that picks fetch name.
if fetches.name == "inc_v:0":
# If inc_v, watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If dec_v, watch nothing.
return "DebugIdentity", r"$^", r"$^"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
sess.run(self.dec_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(6, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertGreater(dump.size, 0)
self.assertAllClose([10.0 - 0.4 * (i / 2)],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.dec_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingWithLegacyWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return ["DebugIdentity", "DebugNumericSummary"], r".*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
def testDumpingWithWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r"^v.*",
op_type_regex_whitelist=r".*",
tensor_dtype_regex_whitelist=".*_ref")
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
dumped_nodes = [dump.node_name for dump in dump.dumped_tensor_data]
self.assertNotIn("inc_v", dumped_nodes)
self.assertNotIn("delta", dumped_nodes)
def testDumpingDebugHookWithoutWatchFnWorks(self):
dumping_hook = hooks.DumpingDebugHook(self.session_root, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch every ref-type tensor.
return framework.WatchOptions(
debug_ops="DebugIdentity",
tensor_dtype_regex_whitelist=".*_ref")
else:
# If even-index run, watch nothing.
return framework.WatchOptions(
debug_ops="DebugIdentity",
node_name_regex_whitelist=r"^$",
op_type_regex_whitelist=r"^$")
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertNotIn("delta",
[datum.node_name for datum in dump.dumped_tensor_data])
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulLegacyWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If even-index run, watch nothing.
return "DebugIdentity", r"$^", r"$^"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingFromMultipleThreadsObeysThreadNameFilter(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False,
thread_name_filter=r"MainThread$")
self.assertAllClose(1.0, sess.run(self.delta))
child_thread_result = []
def child_thread_job():
child_thread_result.append(sess.run(self.eta))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
thread.join()
self.assertAllClose([-1.4], child_thread_result)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertEqual(1, dump.size)
self.assertEqual("delta", dump.dumped_tensor_data[0].node_name)
def testDumpingWrapperWithEmptyFetchWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run([])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/wrappers/dumping_wrapper_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger wrapper session that sends debug data to file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import signal
import sys
import traceback
import six
# Google-internal import(s).
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.wrappers import framework
def publish_traceback(debug_server_urls,
graph,
feed_dict,
fetches,
old_graph_version):
"""Publish traceback and source code if graph version is new.
`graph.version` is compared with `old_graph_version`. If the former is higher
(i.e., newer), the graph traceback and the associated source code is sent to
the debug server at the specified gRPC URLs.
Args:
debug_server_urls: A single gRPC debug server URL as a `str` or a `list` of
debug server URLs.
graph: A Python `tf.Graph` object.
feed_dict: Feed dictionary given to the `Session.run()` call.
fetches: Fetches from the `Session.run()` call.
old_graph_version: Old graph version to compare to.
Returns:
If `graph.version > old_graph_version`, the new graph version as an `int`.
Else, the `old_graph_version` is returned.
"""
# TODO(cais): Consider moving this back to the top, after grpc becomes a
# pip dependency of tensorflow or tf_debug.
# pylint:disable=g-import-not-at-top
from tensorflow.python.debug.lib import source_remote
# pylint:enable=g-import-not-at-top
if graph.version > old_graph_version:
run_key = common.get_run_key(feed_dict, fetches)
source_remote.send_graph_tracebacks(
debug_server_urls, run_key, traceback.extract_stack(), graph,
send_source=True)
return graph.version
else:
return old_graph_version
class GrpcDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):
"""Debug Session wrapper that send debug data to gRPC stream(s)."""
def __init__(self,
sess,
grpc_debug_server_addresses,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
grpc_debug_server_addresses: (`str` or `list` of `str`) Single or a list
of the gRPC debug server addresses, in the format of
<host:port>, with or without the "grpc://" prefix. For example:
"localhost:7000",
["localhost:7000", "192.168.0.2:8000"]
watch_fn: (`Callable`) A Callable that can be used to define per-run
debug ops and watched tensors. See the doc of
`NonInteractiveDebugWrapperSession.__init__()` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (`bool`) whether the usage of this class is to be logged.
Raises:
TypeError: If `grpc_debug_server_addresses` is not a `str` or a `list`
of `str`.
"""
if log_usage:
pass # No logging for open-source.
framework.NonInteractiveDebugWrapperSession.__init__(
self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter)
if isinstance(grpc_debug_server_addresses, str):
self._grpc_debug_server_urls = [
self._normalize_grpc_url(grpc_debug_server_addresses)]
elif isinstance(grpc_debug_server_addresses, list):
self._grpc_debug_server_urls = []
for address in grpc_debug_server_addresses:
if not isinstance(address, str):
raise TypeError(
"Expected type str in list grpc_debug_server_addresses, "
"received type %s" % type(address))
self._grpc_debug_server_urls.append(self._normalize_grpc_url(address))
else:
raise TypeError(
"Expected type str or list in grpc_debug_server_addresses, "
"received type %s" % type(grpc_debug_server_addresses))
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Implementation of abstract method in superclass.
See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
for details.
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
this `Session.run()` call.
"""
return self._grpc_debug_server_urls
def _normalize_grpc_url(self, address):
return (common.GRPC_URL_PREFIX + address
if not address.startswith(common.GRPC_URL_PREFIX) else address)
def _signal_handler(unused_signal, unused_frame):
while True:
response = six.moves.input(
"\nSIGINT received. Quit program? (Y/n): ").strip()
if response in ("", "Y", "y"):
sys.exit(0)
elif response in ("N", "n"):
break
def register_signal_handler():
try:
signal.signal(signal.SIGINT, _signal_handler)
except ValueError:
# This can happen if we are not in the MainThread.
pass
class TensorBoardDebugWrapperSession(GrpcDebugWrapperSession):
"""A tfdbg Session wrapper that can be used with TensorBoard Debugger Plugin.
This wrapper is the same as `GrpcDebugWrapperSession`, except that it uses a
predefined `watch_fn` that
1) uses `DebugIdentity` debug ops with the `gated_grpc` attribute set to
`True` to allow the interactive enabling and disabling of tensor
breakpoints.
2) watches all tensors in the graph.
This saves the need for the user to define a `watch_fn`.
"""
def __init__(self,
sess,
grpc_debug_server_addresses,
thread_name_filter=None,
send_traceback_and_source_code=True,
log_usage=True):
"""Constructor of TensorBoardDebugWrapperSession.
Args:
sess: The `tf.compat.v1.Session` instance to be wrapped.
grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a
`str` or a `list` of `str`s. E.g., "localhost:2333",
"grpc://localhost:2333", ["192.168.0.7:2333", "192.168.0.8:2333"].
thread_name_filter: Optional filter for thread names.
send_traceback_and_source_code: Whether traceback of graph elements and
the source code are to be sent to the debug server(s).
log_usage: Whether the usage of this class is to be logged (if
applicable).
"""
def _gated_grpc_watch_fn(fetches, feeds):
del fetches, feeds # Unused.
return framework.WatchOptions(
debug_ops=["DebugIdentity(gated_grpc=true)"])
super(TensorBoardDebugWrapperSession, self).__init__(
sess,
grpc_debug_server_addresses,
watch_fn=_gated_grpc_watch_fn,
thread_name_filter=thread_name_filter,
log_usage=log_usage)
self._send_traceback_and_source_code = send_traceback_and_source_code
# Keeps track of the latest version of Python graph object that has been
# sent to the debug servers.
self._sent_graph_version = -1
register_signal_handler()
def run(self,
fetches,
feed_dict=None,
options=None,
run_metadata=None,
callable_runner=None,
callable_runner_args=None,
callable_options=None):
if self._send_traceback_and_source_code:
self._sent_graph_version = publish_traceback(
self._grpc_debug_server_urls, self.graph, feed_dict, fetches,
self._sent_graph_version)
return super(TensorBoardDebugWrapperSession, self).run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata,
callable_runner=callable_runner,
callable_runner_args=callable_runner_args,
callable_options=callable_options)
|
tensorflow-master
|
tensorflow/python/debug/wrappers/grpc_wrapper.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_deprecated_v1
class DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
# For efficient testing, set the disk usage bytes limit to a small
# number (10).
os.environ["TFDBG_DISK_BYTES_LIMIT"] = "10"
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def testWrapperSessionNotExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r"(.*delta.*|.*inc_v.*)", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root,
watch_fn=_watch_fn, log_usage=False)
sess.run(self.inc_v)
def testWrapperSessionExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root,
watch_fn=_watch_fn, log_usage=False)
# Due to the watch function, each run should dump only 1 tensor,
# which has a size of 4 bytes, which corresponds to the dumped 'delta:0'
# tensor of scalar shape and float32 dtype.
# 1st run should pass, after which the disk usage is at 4 bytes.
sess.run(self.inc_v)
# 2nd run should also pass, after which 8 bytes are used.
sess.run(self.inc_v)
# 3rd run should fail, because the total byte count (12) exceeds the
# limit (10)
with self.assertRaises(ValueError):
sess.run(self.inc_v)
def testHookNotExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
def testHookExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
# Like in `testWrapperSessionExceedingLimit`, the first two calls
# should be within the byte limit, but the third one should error
# out due to exceeding the limit.
mon_sess.run(self.inc_v)
mon_sess.run(self.inc_v)
with self.assertRaises(ValueError):
mon_sess.run(self.inc_v)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/wrappers/disk_usage_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Analyzer CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import cli_test_utils
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def _cli_config_from_temp_file():
return cli_config.CLIConfig(
config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config"))
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def line_number_above():
return tf_inspect.stack()[1][2] - 1
def parse_op_and_node(line):
"""Parse a line containing an op node followed by a node name.
For example, if the line is
" [Variable] hidden/weights",
this function will return ("Variable", "hidden/weights")
Args:
line: The line to be parsed, as a str.
Returns:
Name of the parsed op type.
Name of the parsed node.
"""
op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "")
# Not using [-1], to tolerate any other items that might be present behind
# the node name.
node_name = line.strip().split(" ")[1]
return op_type, node_name
def assert_column_header_command_shortcut(tst,
command,
reverse,
node_name_regex,
op_type_regex,
tensor_filter_name):
tst.assertFalse(reverse and "-r" in command)
tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command)
tst.assertFalse(
not(node_name_regex) and ("-t %s" % node_name_regex) in command)
tst.assertFalse(
not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command)
def assert_listed_tensors(tst,
out,
expected_tensor_names,
expected_op_types,
node_name_regex=None,
op_type_regex=None,
tensor_filter_name=None,
sort_by="timestamp",
reverse=False):
"""Check RichTextLines output for list_tensors commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
expected_tensor_names: (list of str) Expected tensor names in the list.
expected_op_types: (list of str) Expected op types of the tensors, in the
same order as the expected_tensor_names.
node_name_regex: Optional: node name regex filter.
op_type_regex: Optional: op type regex filter.
tensor_filter_name: Optional: name of the tensor filter.
sort_by: (str) (timestamp | op_type | tensor_name) the field by which the
tensors in the list are sorted.
reverse: (bool) whether the sorting is in reverse (i.e., descending) order.
"""
line_iter = iter(out.lines)
attr_segs = out.font_attr_segs
line_counter = 0
num_tensors = len(expected_tensor_names)
if tensor_filter_name is None:
tst.assertEqual("%d dumped tensor(s):" % num_tensors, next(line_iter))
else:
tst.assertEqual("%d dumped tensor(s) passing filter \"%s\":" %
(num_tensors, tensor_filter_name), next(line_iter))
line_counter += 1
if op_type_regex is not None:
tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex,
next(line_iter))
line_counter += 1
if node_name_regex is not None:
tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex,
next(line_iter))
line_counter += 1
tst.assertEqual("", next(line_iter))
line_counter += 1
# Verify the column heads "t (ms)", "Op type" and "Tensor name" are present.
line = next(line_iter)
tst.assertIn("t (ms)", line)
tst.assertIn("Op type", line)
tst.assertIn("Tensor name", line)
# Verify the command shortcuts in the top row.
attr_segs = out.font_attr_segs[line_counter]
attr_seg = attr_segs[0]
tst.assertEqual(0, attr_seg[0])
tst.assertEqual(len("t (ms)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s timestamp", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Size")
attr_seg = attr_segs[1]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s dump_size", command)
assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,
op_type_regex, tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Op type")
attr_seg = attr_segs[2]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Op type"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s op_type", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Tensor name")
attr_seg = attr_segs[3]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s tensor_name", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
# Verify the listed tensors and their timestamps.
tensor_timestamps = []
dump_sizes_bytes = []
op_types = []
tensor_names = []
for line in line_iter:
items = line.split(" ")
items = [item for item in items if item]
rel_time = float(items[0][1:-1])
tst.assertGreaterEqual(rel_time, 0.0)
tensor_timestamps.append(rel_time)
dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))
op_types.append(items[2])
tensor_names.append(items[3])
# Verify that the tensors should be listed in ascending order of their
# timestamps.
if sort_by == "timestamp":
sorted_timestamps = sorted(tensor_timestamps)
if reverse:
sorted_timestamps.reverse()
tst.assertEqual(sorted_timestamps, tensor_timestamps)
elif sort_by == "dump_size":
sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)
if reverse:
sorted_dump_sizes_bytes.reverse()
tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)
elif sort_by == "op_type":
sorted_op_types = sorted(op_types)
if reverse:
sorted_op_types.reverse()
tst.assertEqual(sorted_op_types, op_types)
elif sort_by == "tensor_name":
sorted_tensor_names = sorted(tensor_names)
if reverse:
sorted_tensor_names.reverse()
tst.assertEqual(sorted_tensor_names, tensor_names)
else:
tst.fail("Invalid value in sort_by: %s" % sort_by)
# Verify that the tensors are all listed.
for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):
tst.assertIn(tensor_name, tensor_names)
index = tensor_names.index(tensor_name)
tst.assertEqual(op_type, op_types[index])
def assert_node_attribute_lines(tst,
out,
node_name,
op_type,
device,
input_op_type_node_name_pairs,
ctrl_input_op_type_node_name_pairs,
recipient_op_type_node_name_pairs,
ctrl_recipient_op_type_node_name_pairs,
attr_key_val_pairs=None,
num_dumped_tensors=None,
show_stack_trace=False,
stack_trace_available=False):
"""Check RichTextLines output for node_info commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
node_name: Name of the node.
op_type: Op type of the node, as a str.
device: Name of the device on which the node resides.
input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,
for the (non-control) inputs to the node.
ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the control inputs to the node.
recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the (non-control) output recipients to the node.
ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and
node name, for the control output recipients to the node.
attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a
list of 2-tuples.
num_dumped_tensors: Optional: number of tensor dumps from the node.
show_stack_trace: (bool) whether the stack trace of the node's
construction is asserted to be present.
stack_trace_available: (bool) whether Python stack trace is available.
"""
line_iter = iter(out.lines)
tst.assertEqual("Node %s" % node_name, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" Op: %s" % op_type, next(line_iter))
tst.assertEqual(" Device: %s" % device, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d input(s) + %d control input(s):" %
(len(input_op_type_node_name_pairs),
len(ctrl_input_op_type_node_name_pairs)), next(line_iter))
# Check inputs.
tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs),
next(line_iter))
for op_type, node_name in input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
# Check control inputs.
if ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" %d control input(s):" %
len(ctrl_input_op_type_node_name_pairs), next(line_iter))
for op_type, node_name in ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d recipient(s) + %d control recipient(s):" %
(len(recipient_op_type_node_name_pairs),
len(ctrl_recipient_op_type_node_name_pairs)),
next(line_iter))
# Check recipients, the order of which is not deterministic.
tst.assertEqual(" %d recipient(s):" %
len(recipient_op_type_node_name_pairs), next(line_iter))
t_recs = []
for _ in recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_recs.append((op_type, node_name))
tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)
# Check control recipients, the order of which is not deterministic.
if ctrl_recipient_op_type_node_name_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d control recipient(s):" %
len(ctrl_recipient_op_type_node_name_pairs),
next(line_iter))
t_ctrl_recs = []
for _ in ctrl_recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_ctrl_recs.append((op_type, node_name))
tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)
# The order of multiple attributes can be non-deterministic.
if attr_key_val_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual("Node attributes:", next(line_iter))
kv_pairs = []
for key, val in attr_key_val_pairs:
key = next(line_iter).strip().replace(":", "")
val = next(line_iter).strip()
kv_pairs.append((key, val))
tst.assertEqual("", next(line_iter))
tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)
if num_dumped_tensors is not None:
tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors,
next(line_iter))
tst.assertEqual("", next(line_iter))
dump_timestamps_ms = []
for _ in xrange(num_dumped_tensors):
line = next(line_iter)
tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @")
tst.assertTrue(line.strip().endswith(" ms"))
dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", ""))
tst.assertGreaterEqual(dump_timestamp_ms, 0.0)
dump_timestamps_ms.append(dump_timestamp_ms)
tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)
if show_stack_trace:
tst.assertEqual("", next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual("Traceback of node construction:", next(line_iter))
if stack_trace_available:
try:
depth_counter = 0
while True:
for i in range(5):
line = next(line_iter)
if i == 0:
tst.assertEqual(depth_counter, int(line.split(":")[0]))
elif i == 1:
tst.assertStartsWith(line, " Line:")
elif i == 2:
tst.assertStartsWith(line, " Function:")
elif i == 3:
tst.assertStartsWith(line, " Text:")
elif i == 4:
tst.assertEqual("", line)
depth_counter += 1
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
def create_analyzer_cli(dump):
"""Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
"""
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())
# Construct the handler registry.
registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
registry.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
registry.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
registry.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
registry.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
registry.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
return analyzer, registry
@test_util.run_v1_only("b/120545219")
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variables.VariableV1(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variables.VariableV1(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variables.VariableV1([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
shutil.rmtree(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], ["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command("list_tensors",
["-t", "(Add|MatMul)"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", "MatMul"],
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorWithFilterAndNodeNameExclusionWorks(self):
# First, create and register the filter.
def is_2x1_vector(datum, tensor):
del datum # Unused.
return list(tensor.shape) == [2, 1]
self._analyzer.add_tensor_filter("is_2x1_vector", is_2x1_vector)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command(
"lt", ["-f", "is_2x1_vector", "--filter_exclude_node_names", ".*v.*"])
# If the --filter_exclude_node_names were not used, then the matching
# tensors would be:
# - simple_mul_add/v:0
# - simple_mul_add/v/read:0
# - simple_mul_add/matmul:0
# - simple_mul_add/add:0
#
# With the --filter_exclude_node_names option, only the last two should
# show up in the result.
assert_listed_tensors(
self,
out, ["simple_mul_add/matmul:0", "simple_mul_add/add:0"],
["MatMul", "Add"], tensor_filter_name="is_2x1_vector")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, "MatMul",
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=[("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[("MatMul", "simple_mul_add/matmul")], [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul.npy")
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-w", npy_path],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[7.0], [-2.0]], np.load(npy_path))
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]", "-s"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"Numeric summary:",
"| - + | total |",
"| 1 1 | 2 |",
"| min max mean std |",
"| -2.0 7.0 2.5 4.5 |",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(10, out.annotations)
self.assertIn(11, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[11])
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testEvalExpression(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"eval", ["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name)],
screen_info={"cols": 80})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
"",
"Numeric summary:",
"| - + | total |",
"| 2 2 | 4 |",
"| min max mean std |"],
out.lines[:8])
cli_test_utils.assert_array_lines_close(
self, [-14.0, 49.0, 6.25, 25.7524270701], out.lines[8:9])
cli_test_utils.assert_array_lines_close(
self, [[49.0, -14.0], [-14.0, 4.0]], out.lines[10:])
def testEvalExpressionAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul_eval.npy")
out = self._registry.dispatch_command(
"eval",
["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name), "-w",
npy_path], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
""], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[49.0, -14.0], [-14.0, 4.0]], np.load(npy_path))
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(
TypeError,
"Input argument filter_name is expected to be str, ""but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegexp(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
def _findSourceLine(self, annotated_source, line_number):
"""Find line of given line number in annotated source.
Args:
annotated_source: (debugger_cli_common.RichTextLines) the annotated source
line_number: (int) 1-based line number
Returns:
(int) If line_number is found, 0-based line index in
annotated_source.lines. Otherwise, None.
"""
index = None
for i, line in enumerate(annotated_source.lines):
if line.startswith("L%d " % line_number):
index = i
break
return index
def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source", [self._curr_file_path], screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
# Verify the annotation of the line that creates v.
index = self._findSourceLine(out, self._v_line_number)
self.assertEqual(
["L%d v = variables.VariableV1(v_init, name=v_name)" %
self._v_line_number,
" simple_mul_add/v"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/v",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates w.
index = self._findSourceLine(out, self._w_line_number)
self.assertEqual(
["L%d " % self._w_line_number +
"w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")",
" simple_mul_add/matmul"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/matmul",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates x.
index = self._findSourceLine(out, self._x_line_number)
self.assertEqual(
["L%d " % self._x_line_number +
"x = math_ops.add(w, w, name=\"simple_mul_add/add\")",
" simple_mul_add/add"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content)
def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "--tensors"],
screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u/read:0",
" simple_mul_add/u:0"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u/read:0",
out.font_attr_segs[index + 1][0][2].content)
self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content)
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-b", "3"],
screen_info={"cols": 80})
self.assertEqual(
2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-m", "1"],
screen_info={"cols": 80})
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" (... Omitted 2 of 3 op(s) ...) +5"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
more_elements_command = out.font_attr_segs[index + 2][-1][2].content
self.assertStartsWith(more_elements_command,
"ps %s " % self._curr_file_path)
self.assertIn(" -m 6", more_elements_command)
def testListSourceWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", [])
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"")
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", "^$"])
self.assertEqual([
"List of source files that created nodes in this run",
"Node name regex filter: \"^$\"", "",
"[No source file information.]"], out.lines)
def testListSourceWithPathAndNodeNameFiltersWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"list_source", ["-p", self._curr_file_path, "-n", ".*read"])
self.assertEqual([
"List of source files that created nodes in this run",
"File path regex filter: \"%s\"" % self._curr_file_path,
"Node name regex filter: \".*read\"", ""], out.lines[:4])
def testListSourceWithCompiledPythonSourceWorks(self):
def fake_list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
del dump, path_regex_whitelist, node_name_regex_whitelist
return [("compiled_1.pyc", False, 10, 20, 30, 4),
("compiled_2.pyo", False, 10, 20, 30, 5),
("uncompiled.py", False, 10, 20, 30, 6)]
with test.mock.patch.object(
source_utils, "list_source_files_against_dump",
side_effect=fake_list_source_files_against_dump):
out = self._registry.dispatch_command("list_source", [])
self.assertStartsWith(out.lines[4], "compiled_1.pyc")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[4][0])
self.assertStartsWith(out.lines[5], "compiled_2.pyo")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[5][0])
self.assertStartsWith(out.lines[6], "uncompiled.py")
self.assertEqual(0, out.font_attr_segs[6][0][0])
self.assertEqual(13, out.font_attr_segs[6][0][1])
self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])
self.assertEqual("ps uncompiled.py -b 6",
out.font_attr_segs[6][0][2][1].content)
def testListInputInvolvingNodesWithMultipleOutputs(self):
"""List an input tree containing tensors from non-:0 output slot."""
with session.Session(config=no_rewrite_session_config()) as sess:
x = variables.VariableV1([1, 3, 3, 7], name="x")
_, idx = array_ops.unique(x, name="x_unique")
idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two")
self.evaluate(x.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root_for_unique)
run_metadata = config_pb2.RunMetadata()
self.assertAllEqual(
[0, 2, 2, 4],
sess.run(idx_times_two,
options=run_options,
run_metadata=run_metadata))
debug_dump = debug_data.DebugDumpDir(
self._dump_root_for_unique,
partition_graphs=run_metadata.partition_graphs)
_, registry = create_analyzer_cli(debug_dump)
out = registry.dispatch_command("li", ["idx_times_two"])
self.assertEqual(
["Inputs to node \"idx_times_two\" (Depth limit = 1):",
"|- (1) x_unique:1"], out.lines[:2])
class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
# 2400 elements should exceed the default threshold (2000).
x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command registry.
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testPrintLargeTensorWithoutAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80})
# Assert that ellipses are present in the tensor value printout.
self.assertIn("...,", out.lines[4])
# 2100 still exceeds 2000.
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]"],
screen_info={"cols": 80})
self.assertIn("...,", out.lines[4])
def testPrintLargeTensorWithAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0", "-a"],
screen_info={"cols": 80})
# Assert that ellipses are not present in the tensor value printout.
self.assertNotIn("...,", out.lines[4])
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"],
screen_info={"cols": 80})
self.assertNotIn("...,", out.lines[4])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
with session.Session(config=no_rewrite_session_config()) as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.VariableV1(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command handler registry.
_, cls._registry = create_analyzer_cli(debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(
self, out, "control_deps/ctrl_dep_y", "Identity",
self._main_device, [("Add", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d -t control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]),
"ni -a -d -t control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [Add] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
cond = lambda loop_var: math_ops.less(loop_var, 10)
body = lambda loop_var: math_ops.add(loop_var, 1)
while_loop = control_flow_ops.while_loop(
cond, body, [loop_var], parallel_iterations=1)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_url = "file://%s" % cls._dump_root
watch_opts = run_options.debug_options.debug_tensor_watch_opts
# Add debug tensor watch for "while/Identity".
watch = watch_opts.add()
watch.node_name = "while/Identity"
watch.output_slot = 0
watch.debug_ops.append("DebugIdentity")
watch.debug_urls.append(debug_url)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(while_loop, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"])
self.assertEqual("Tensor \"while/Identity:0\" generated 10 dumps:",
output.lines[0])
for i in xrange(10):
self.assertTrue(output.lines[i + 1].startswith("#%d" % i))
self.assertTrue(output.lines[i + 1].endswith(
" ms] while/Identity:0:DebugIdentity"))
self.assertEqual(
"You can use the -n (--number) flag to specify which dump to print.",
output.lines[-3])
self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5):
output = self._registry.dispatch_command(
"pt", ["while/Identity:0", "-n", "%d" % i])
self.assertEqual("Tensor \"while/Identity:0:DebugIdentity (dump #%d)\":" %
i, output.lines[0])
self.assertEqual(" dtype: int32", output.lines[1])
self.assertEqual(" shape: ()", output.lines[2])
self.assertEqual("", output.lines[3])
self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")"))
def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"])
self.assertEqual([
"ERROR: Specified number (10) exceeds the number of available dumps "
"(10) for tensor while/Identity:0"
], output.lines)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/analyzer_cli_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurations for TensorFlow Debugger (TFDBG) command-line interfaces."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.platform import gfile
RL = debugger_cli_common.RichLine
class CLIConfig(object):
"""Client-facing configurations for TFDBG command-line interfaces."""
_CONFIG_FILE_NAME = ".tfdbg_config"
_DEFAULT_CONFIG = [
("graph_recursion_depth", 20),
("mouse_mode", True),
]
def __init__(self, config_file_path=None):
self._config_file_path = (config_file_path or
self._default_config_file_path())
self._config = collections.OrderedDict(self._DEFAULT_CONFIG)
if gfile.Exists(self._config_file_path):
config = self._load_from_file()
for key, value in config.items():
self._config[key] = value
self._save_to_file()
self._set_callbacks = {}
def get(self, property_name):
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
return self._config[property_name]
def set(self, property_name, property_val):
"""Set the value of a property.
Supports limitd property value types: `bool`, `int` and `str`.
Args:
property_name: Name of the property.
property_val: Value of the property. If the property has `bool` type and
this argument has `str` type, the `str` value will be parsed as a `bool`
Raises:
ValueError: if a `str` property_value fails to be parsed as a `bool`.
KeyError: if `property_name` is an invalid property name.
"""
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
orig_val = self._config[property_name]
if isinstance(orig_val, bool):
if isinstance(property_val, str):
if property_val.lower() in ("1", "true", "t", "yes", "y", "on"):
property_val = True
elif property_val.lower() in ("0", "false", "f", "no", "n", "off"):
property_val = False
else:
raise ValueError(
"Invalid string value for bool type: %s" % property_val)
else:
property_val = bool(property_val)
elif isinstance(orig_val, int):
property_val = int(property_val)
elif isinstance(orig_val, str):
property_val = str(property_val)
else:
raise TypeError("Unsupported property type: %s" % type(orig_val))
self._config[property_name] = property_val
self._save_to_file()
# Invoke set-callback.
if property_name in self._set_callbacks:
self._set_callbacks[property_name](self._config)
def set_callback(self, property_name, callback):
"""Set a set-callback for given property.
Args:
property_name: Name of the property.
callback: The callback as a `callable` of signature:
def cbk(config):
where config is the config after it is set to the new value.
The callback is invoked each time the set() method is called with the
matching property_name.
Raises:
KeyError: If property_name does not exist.
TypeError: If `callback` is not callable.
"""
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
if not callable(callback):
raise TypeError("The callback object provided is not callable.")
self._set_callbacks[property_name] = callback
def _default_config_file_path(self):
return os.path.join(os.path.expanduser("~"), self._CONFIG_FILE_NAME)
def _save_to_file(self):
try:
with gfile.Open(self._config_file_path, "w") as config_file:
json.dump(self._config, config_file)
except IOError:
pass
def summarize(self, highlight=None):
"""Get a text summary of the config.
Args:
highlight: A property name to highlight in the output.
Returns:
A `RichTextLines` output.
"""
lines = [RL("Command-line configuration:", "bold"), RL("")]
for name, val in self._config.items():
highlight_attr = "bold" if name == highlight else None
line = RL(" ")
line += RL(name, ["underline", highlight_attr])
line += RL(": ")
line += RL(str(val), font_attr=highlight_attr)
lines.append(line)
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def _load_from_file(self):
try:
with gfile.Open(self._config_file_path, "r") as config_file:
config_dict = json.load(config_file)
config = collections.OrderedDict()
for key in sorted(config_dict.keys()):
config[key] = config_dict[key]
return config
except (IOError, ValueError):
# The reading of the config file may fail due to IO issues or file
# corruption. We do not want tfdbg to error out just because of that.
return dict()
|
tensorflow-master
|
tensorflow/python/debug/cli/cli_config.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Building Blocks of the TensorFlow Debugger CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import numpy as np
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CommandLineExitTest(test_util.TensorFlowTestCase):
def testConstructionWithoutToken(self):
exit_exc = debugger_cli_common.CommandLineExit()
self.assertTrue(isinstance(exit_exc, Exception))
def testConstructionWithToken(self):
exit_exc = debugger_cli_common.CommandLineExit(exit_token={"foo": "bar"})
self.assertTrue(isinstance(exit_exc, Exception))
self.assertEqual({"foo": "bar"}, exit_exc.exit_token)
class RichTextLinesTest(test_util.TensorFlowTestCase):
def testRichTextLinesConstructorComplete(self):
# Test RichTextLines constructor.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual(2, len(screen_output.annotations))
self.assertEqual(2, screen_output.num_lines())
def testRichTextLinesConstructorWithInvalidType(self):
with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
debugger_cli_common.RichTextLines(123)
def testRichTextLinesConstructorWithString(self):
# Test constructing a RichTextLines object with a string, instead of a list
# of strings.
screen_output = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
self.assertEqual(1, len(screen_output.lines))
self.assertEqual(1, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.annotations))
def testRichLinesAppendRichLine(self):
rtl = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]})
rtl.append_rich_line(debugger_cli_common.RichLine("Violets are ") +
debugger_cli_common.RichLine("blue", "blue"))
self.assertEqual(2, len(rtl.lines))
self.assertEqual(2, len(rtl.font_attr_segs))
self.assertEqual(1, len(rtl.font_attr_segs[0]))
self.assertEqual(1, len(rtl.font_attr_segs[1]))
def testRichLineLenMethodWorks(self):
self.assertEqual(0, len(debugger_cli_common.RichLine()))
self.assertEqual(0, len(debugger_cli_common.RichLine("")))
self.assertEqual(1, len(debugger_cli_common.RichLine("x")))
self.assertEqual(6, len(debugger_cli_common.RichLine("x y z ", "blue")))
def testRichTextLinesConstructorIncomplete(self):
# Test RichTextLines constructor, with incomplete keyword arguments.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual({}, screen_output.annotations)
def testModifyRichTextLinesObject(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
self.assertEqual(2, len(screen_output.lines))
screen_output.lines.append("Sugar is sweet")
self.assertEqual(3, len(screen_output.lines))
def testMergeRichTextLines(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines(
["Lilies are white", "Sunflowers are yellow"],
font_attr_segs={0: [(0, 6, "white")],
1: [(0, 7, "yellow")]},
annotations={
"metadata": "foo",
0: "full spectrum",
1: "medium wavelength"
})
screen_output_1.extend(screen_output_2)
self.assertEqual(4, screen_output_1.num_lines())
self.assertEqual([
"Roses are red", "Violets are blue", "Lilies are white",
"Sunflowers are yellow"
], screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
"metadata": "foo",
0: "longer wavelength",
1: "shorter wavelength",
2: "full spectrum",
3: "medium wavelength"
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptyOther(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines([])
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptySelf(self):
screen_output_1 = debugger_cli_common.RichTextLines([])
screen_output_2 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testAppendALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.append("Violets are blue", [(0, 7, "blue")])
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
def testPrependALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.prepend("Violets are blue", font_attr_segs=[(0, 7, "blue")])
self.assertEqual(["Violets are blue", "Roses are red"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 7, "blue")],
1: [(0, 5, "red")],
}, screen_output_1.font_attr_segs)
def testWriteToFileSucceeds(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = tempfile.mktemp()
screen_output.write_to_file(file_path)
with gfile.Open(file_path, "r") as f:
self.assertEqual("Roses are red\nViolets are blue\n", f.read())
# Clean up.
gfile.Remove(file_path)
def testAttemptToWriteToADirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
with self.assertRaises(Exception):
screen_output.write_to_file("/")
def testAttemptToWriteToFileInNonexistentDirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = os.path.join(tempfile.mkdtemp(), "foo", "bar.txt")
with self.assertRaises(Exception):
screen_output.write_to_file(file_path)
class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegexp(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegexp(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegexp(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegexp(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are reigstered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key "
"dependencies.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", "",
"version", " Aliases: ver", "",
" Print the versions of TensorFlow and its key dependencies.", "", ""
], output.lines)
class RegexFindTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
def testRegexFindOnPrependedLinesWorks(self):
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["Roses are red"])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "red", "bold")
self.assertEqual(
{0: [(10, 13, "bold")]}, searched_rich_lines.font_attr_segs)
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["A poem"], font_attr_segs=[(0, 1, "underline")])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "poem", "italic")
self.assertEqual(
{0: [(0, 1, "underline"), (2, 6, "italic")]},
searched_rich_lines.font_attr_segs)
class WrapScreenOutputTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 5, "red"), (6, 9, "gray"), (10, 12, "red"),
(12, 13, "crimson")],
2: [(0, 7, "blue"), (8, 11, "gray"), (12, 14, "blue"),
(14, 16, "indigo")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
def testNoActualWrapping(self):
# Large column limit should lead to no actual wrapping.
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 100)
self.assertEqual(self._orig_screen_output.lines, out.lines)
self.assertEqual(self._orig_screen_output.font_attr_segs,
out.font_attr_segs)
self.assertEqual(self._orig_screen_output.annotations, out.annotations)
self.assertEqual(new_line_indices, [0, 1, 2])
def testWrappingWithAttrCutoff(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 11)
# Add non-row-index field to out.
out.annotations["metadata"] = "foo"
# Check wrapped text.
self.assertEqual(5, len(out.lines))
self.assertEqual("Folk song:", out.lines[0])
self.assertEqual("Roses are r", out.lines[1])
self.assertEqual("ed", out.lines[2])
self.assertEqual("Violets are", out.lines[3])
self.assertEqual(" blue", out.lines[4])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertEqual([(0, 5, "red"), (6, 9, "gray"), (10, 11, "red")],
out.font_attr_segs[1])
self.assertEqual([(0, 1, "red"), (1, 2, "crimson")], out.font_attr_segs[2])
self.assertEqual([(0, 7, "blue"), (8, 11, "gray")], out.font_attr_segs[3])
self.assertEqual([(1, 3, "blue"), (3, 5, "indigo")], out.font_attr_segs[4])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[1])
self.assertFalse(2 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[3])
self.assertFalse(4 in out.annotations)
# Chec that the non-row-index field is present in output.
self.assertEqual("foo", out.annotations["metadata"])
self.assertEqual(new_line_indices, [0, 1, 3])
def testWrappingWithMultipleAttrCutoff(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 12, "red")],
2: [(1, 16, "blue")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 5)
# Check wrapped text.
self.assertEqual(9, len(out.lines))
self.assertEqual("Folk ", out.lines[0])
self.assertEqual("song:", out.lines[1])
self.assertEqual("Roses", out.lines[2])
self.assertEqual(" are ", out.lines[3])
self.assertEqual("red", out.lines[4])
self.assertEqual("Viole", out.lines[5])
self.assertEqual("ts ar", out.lines[6])
self.assertEqual("e blu", out.lines[7])
self.assertEqual("e", out.lines[8])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertFalse(1 in out.font_attr_segs)
self.assertEqual([(0, 5, "red")], out.font_attr_segs[2])
self.assertEqual([(0, 5, "red")], out.font_attr_segs[3])
self.assertEqual([(0, 2, "red")], out.font_attr_segs[4])
self.assertEqual([(1, 5, "blue")], out.font_attr_segs[5])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[6])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[7])
self.assertEqual([(0, 1, "blue")], out.font_attr_segs[8])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertFalse(1 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[2])
self.assertFalse(3 in out.annotations)
self.assertFalse(4 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[5])
self.assertFalse(6 in out.annotations)
self.assertFalse(7 in out.annotations)
self.assertFalse(8 in out.annotations)
self.assertEqual(new_line_indices, [0, 2, 5])
def testWrappingInvalidArguments(self):
with self.assertRaisesRegexp(ValueError,
"Invalid type of input screen_output"):
debugger_cli_common.wrap_rich_text_lines("foo", 12)
with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
def testWrappingEmptyInput(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines([]), 10)
self.assertEqual([], out.lines)
self.assertEqual([], new_line_indices)
class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={
0: "longer wavelength",
1: "shorter wavelength",
"foo_metadata": "bar"
})
def testSliceBeginning(self):
sliced = self._original.slice(0, 1)
self.assertEqual(["Roses are red"], sliced.lines)
self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs)
# Non-line-number metadata should be preseved.
self.assertEqual({
0: "longer wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testSliceEnd(self):
sliced = self._original.slice(1, 2)
self.assertEqual(["Violets are blue"], sliced.lines)
# The line index should have changed from 1 to 0.
self.assertEqual({0: [(0, 7, "blue")]}, sliced.font_attr_segs)
self.assertEqual({
0: "shorter wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testAttemptSliceWithNegativeIndex(self):
with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
self._original.slice(0, -1)
class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegexp(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
class CommandHistoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._history_file_path = tempfile.mktemp()
self._cmd_hist = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
def tearDown(self):
if os.path.isfile(self._history_file_path):
os.remove(self._history_file_path)
def _restoreFileReadWritePermissions(self, file_path):
os.chmod(file_path,
(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR |
stat.S_IWGRP | stat.S_IWOTH))
def testLookUpMostRecent(self):
self.assertEqual([], self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("node_info node_b")
self.assertEqual(["node_info node_b"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(4))
# Go over the limit.
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(4))
def testLookUpPrefix(self):
self._cmd_hist.add_command("node_info node_b")
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.lookup_prefix("node_info", 10))
self.assertEqual(["node_info node_a"], self._cmd_hist.lookup_prefix(
"node_info", 1))
self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
def testAddNonStrCommand(self):
with self.assertRaisesRegexp(
TypeError, "Attempt to enter non-str entry to command history"):
self._cmd_hist.add_command(["print_tensor node_a:0"])
def testRepeatingCommandsDoNotGetLoggedRepeatedly(self):
self._cmd_hist.add_command("help")
self._cmd_hist.add_command("help")
self.assertEqual(["help"], self._cmd_hist.most_recent_n(2))
def testCommandHistoryFileIsCreated(self):
self.assertFalse(os.path.isfile(self._history_file_path))
self._cmd_hist.add_command("help")
self.assertTrue(os.path.isfile(self._history_file_path))
with open(self._history_file_path, "rt") as f:
self.assertEqual(["help\n"], f.readlines())
def testLoadingCommandHistoryFileObeysLimit(self):
self._cmd_hist.add_command("help 1")
self._cmd_hist.add_command("help 2")
self._cmd_hist.add_command("help 3")
self._cmd_hist.add_command("help 4")
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help 2", "help 3", "help 4"],
cmd_hist_2.most_recent_n(3))
with open(self._history_file_path, "rt") as f:
self.assertEqual(
["help 2\n", "help 3\n", "help 4\n"], f.readlines())
def testCommandHistoryHandlesReadingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to not readable by anyone.
os.chmod(self._history_file_path, 0)
# The creation of a CommandHistory object should not error out.
debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self._restoreFileReadWritePermissions(self._history_file_path)
def testCommandHistoryHandlesWritingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to read-only.
os.chmod(self._history_file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Reading from the file should still work.
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_2.most_recent_n(1))
# Writing should no longer work, but it should fail silently and
# the within instance-command history should still work.
cmd_hist_2.add_command("foo")
self.assertEqual(["help", "foo"], cmd_hist_2.most_recent_n(2))
cmd_hist_3 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_3.most_recent_n(1))
self._restoreFileReadWritePermissions(self._history_file_path)
class MenuNodeTest(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
class MenuTest(test_util.TensorFlowTestCase):
def setUp(self):
self.menu = debugger_cli_common.Menu()
self.assertEqual(0, self.menu.num_items())
self.node1 = debugger_cli_common.MenuItem("water flower", "water_flower")
self.node2 = debugger_cli_common.MenuItem(
"measure wavelength", "measure_wavelength")
self.menu.append(self.node1)
self.menu.append(self.node2)
self.assertEqual(2, self.menu.num_items())
def testFormatAsSingleLineWithStrItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs="underline")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithListItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs=["underline", "bold"])
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline", "bold"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline", "bold"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithNoneItemAttrsWorks(self):
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testInsertNode(self):
self.assertEqual(["water flower", "measure wavelength"],
self.menu.captions())
node2 = debugger_cli_common.MenuItem("write poem", "write_poem")
self.menu.insert(1, node2)
self.assertEqual(["water flower", "write poem", "measure wavelength"],
self.menu.captions())
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, write poem, measure wavelength, "],
output.lines)
def testFormatAsSingleLineWithDisabledNode(self):
node2 = debugger_cli_common.MenuItem(
"write poem", "write_poem", enabled=False)
self.menu.append(node2)
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", disabled_item_attrs="bold")
self.assertEqual(["Menu: water flower, measure wavelength, write poem, "],
output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual((40, 50, ["bold"]), output.font_attr_segs[0][2])
class GetTensorFlowVersionLinesTest(test_util.TensorFlowTestCase):
def testGetVersionWithoutDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines()
self.assertEqual(2, len(out.lines))
self.assertEqual(
"TensorFlow version: %s" % pywrap_tensorflow_internal.__version__,
out.lines[0])
def testGetVersionWithDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines(True)
self.assertIn(
"TensorFlow version: %s" % pywrap_tensorflow_internal.__version__,
out.lines)
self.assertIn(" numpy: %s" % np.__version__, out.lines)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/debugger_cli_common_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the shared functions and classes for tfdbg CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class BytesToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneSizeWorks(self):
self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
def testSizesBelowOneKiloByteWorks(self):
self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
def testSizesBetweenOneKiloByteandOneMegaByteWorks(self):
self.assertEqual("1.00k", cli_shared.bytes_to_readable_str(1024))
self.assertEqual("2.40k", cli_shared.bytes_to_readable_str(int(1024 * 2.4)))
self.assertEqual("1023.00k", cli_shared.bytes_to_readable_str(1024 * 1023))
def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024**2))
self.assertEqual("2.40M",
cli_shared.bytes_to_readable_str(int(1024**2 * 2.4)))
self.assertEqual("1023.00M",
cli_shared.bytes_to_readable_str(1024**2 * 1023))
def testSizeAboveOneGigaByteWorks(self):
self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024**3))
self.assertEqual("2000.00G",
cli_shared.bytes_to_readable_str(1024**3 * 2000))
def testReadableStrIncludesBAtTheEndOnRequest(self):
self.assertEqual("0B", cli_shared.bytes_to_readable_str(0, include_b=True))
self.assertEqual(
"1.00kB", cli_shared.bytes_to_readable_str(
1024, include_b=True))
self.assertEqual(
"1.00MB", cli_shared.bytes_to_readable_str(
1024**2, include_b=True))
self.assertEqual(
"1.00GB", cli_shared.bytes_to_readable_str(
1024**3, include_b=True))
class TimeToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneTimeWorks(self):
self.assertEqual("0", cli_shared.time_to_readable_str(None))
def testMicrosecondsTime(self):
self.assertEqual("40us", cli_shared.time_to_readable_str(40))
def testMillisecondTime(self):
self.assertEqual("40ms", cli_shared.time_to_readable_str(40e3))
def testSecondTime(self):
self.assertEqual("40s", cli_shared.time_to_readable_str(40e6))
def testForceTimeUnit(self):
self.assertEqual("40s",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("40000ms",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_MS))
self.assertEqual("40000000us",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_US))
self.assertEqual("4e-05s",
cli_shared.time_to_readable_str(
40, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("0",
cli_shared.time_to_readable_str(
0, force_time_unit=cli_shared.TIME_UNIT_S))
with self.assertRaisesRegexp(ValueError, r"Invalid time unit: ks"):
cli_shared.time_to_readable_str(100, force_time_unit="ks")
@test_util.run_deprecated_v1
class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.const_a = constant_op.constant(11.0, name="a")
self.const_b = constant_op.constant(22.0, name="b")
self.const_c = constant_op.constant(33.0, name="c")
self.sparse_d = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1.0, 2.0], dense_shape=[3, 3])
def tearDown(self):
ops.reset_default_graph()
def testSingleFetchNoFeeds(self):
run_start_intro = cli_shared.get_run_start_intro(12, self.const_a, None, {})
# Verify line about run() call number.
self.assertTrue(run_start_intro.lines[1].endswith("run() call #12:"))
# Verify line about fetch.
const_a_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
# Verify line about feeds.
feeds_line = run_start_intro.lines[7]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify lines about possible commands and their font attributes.
self.assertEqual("run:", run_start_intro.lines[11][2:])
annot = run_start_intro.font_attr_segs[11][0]
self.assertEqual(2, annot[0])
self.assertEqual(5, annot[1])
self.assertEqual("run", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
annot = run_start_intro.font_attr_segs[13][0]
self.assertEqual(2, annot[0])
self.assertEqual(8, annot[1])
self.assertEqual("run -n", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
self.assertEqual("run -t <T>:", run_start_intro.lines[15][2:])
self.assertEqual([(2, 12, "bold")], run_start_intro.font_attr_segs[15])
self.assertEqual("run -f <filter_name>:", run_start_intro.lines[17][2:])
self.assertEqual([(2, 22, "bold")], run_start_intro.font_attr_segs[17])
# Verify short description.
description = cli_shared.get_run_short_description(12, self.const_a, None)
self.assertEqual("run #12: 1 fetch (a:0); 0 feeds", description)
# Verify the main menu associated with the run_start_intro.
self.assertIn(debugger_cli_common.MAIN_MENU_KEY,
run_start_intro.annotations)
menu = run_start_intro.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertEqual("run", menu.caption_to_item("run").content)
self.assertEqual("exit", menu.caption_to_item("exit").content)
def testSparseTensorAsFeedShouldHandleNoNameAttribute(self):
sparse_feed_val = ([[0, 0], [1, 1]], [10.0, 20.0])
run_start_intro = cli_shared.get_run_start_intro(
1, self.sparse_d, {self.sparse_d: sparse_feed_val}, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[7].strip())
short_description = cli_shared.get_run_short_description(
1, self.sparse_d, {self.sparse_d: sparse_feed_val})
self.assertEqual(
"run #1: 1 fetch; 1 feed (%s)" % self.sparse_d, short_description)
def testSparseTensorAsFetchShouldHandleNoNameAttribute(self):
run_start_intro = cli_shared.get_run_start_intro(1, self.sparse_d, None, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[4].strip())
def testTwoFetchesListNoFeeds(self):
fetches = [self.const_a, self.const_b]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testNestedListAsFetches(self):
fetches = [self.const_c, [self.const_a, self.const_b]]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches.
self.assertEqual(self.const_c.name, run_start_intro.lines[4].strip())
self.assertEqual(self.const_a.name, run_start_intro.lines[5].strip())
self.assertEqual(self.const_b.name, run_start_intro.lines[6].strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testNestedDictAsFetches(self):
fetches = {"c": self.const_c, "ab": {"a": self.const_a, "b": self.const_b}}
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches. The ordering of the dict keys is
# indeterminate.
fetch_names = set()
fetch_names.add(run_start_intro.lines[4].strip())
fetch_names.add(run_start_intro.lines[5].strip())
fetch_names.add(run_start_intro.lines[6].strip())
self.assertEqual({"a:0", "b:0", "c:0"}, fetch_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testTwoFetchesAsTupleNoFeeds(self):
fetches = (self.const_a, self.const_b)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testTwoFetchesAsNamedTupleNoFeeds(self):
fetches_namedtuple = namedtuple("fetches", "x y")
fetches = fetches_namedtuple(self.const_b, self.const_c)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_b_name_line = run_start_intro.lines[4]
const_c_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_b.name, const_b_name_line.strip())
self.assertEqual(self.const_c.name, const_c_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testWithFeedDict(self):
feed_dict = {
self.const_a: 10.0,
self.const_b: 20.0,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
{})
const_c_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_c.name, const_c_name_line.strip())
# Verify lines about the feed dict.
feed_a_line = run_start_intro.lines[7]
feed_b_line = run_start_intro.lines[8]
self.assertEqual(self.const_a.name, feed_a_line.strip())
self.assertEqual(self.const_b.name, feed_b_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 2 feeds", description)
def testTensorFilters(self):
feed_dict = {self.const_a: 10.0}
tensor_filters = {
"filter_a": lambda x: True,
"filter_b": lambda x: False,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
tensor_filters)
# Verify the listed names of the tensor filters.
filter_names = set()
filter_names.add(run_start_intro.lines[20].split(" ")[-1])
filter_names.add(run_start_intro.lines[21].split(" ")[-1])
self.assertEqual({"filter_a", "filter_b"}, filter_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 1 feed (a:0)", description)
# Verify the command links for the two filters.
command_set = set()
annot = run_start_intro.font_attr_segs[20][0]
command_set.add(annot[2].content)
annot = run_start_intro.font_attr_segs[21][0]
command_set.add(annot[2].content)
self.assertEqual({"run -f filter_a", "run -f filter_b"}, command_set)
def testGetRunShortDescriptionWorksForTensorFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {self.const_a: 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (a:0)", short_description)
def testGetRunShortDescriptionWorksForUnicodeFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {u"foo": 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (foo)", short_description)
@test_util.run_deprecated_v1
class GetErrorIntroTest(test_util.TensorFlowTestCase):
def setUp(self):
self.var_a = variables.Variable(42.0, name="a")
def tearDown(self):
ops.reset_default_graph()
def testShapeError(self):
tf_error = errors.OpError(None, self.var_a.initializer, "foo description",
None)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertEqual("!!! An error occurred during the run !!!",
error_intro.lines[1])
self.assertEqual([(0, len(error_intro.lines[1]), "blink")],
error_intro.font_attr_segs[1])
self.assertEqual(2, error_intro.lines[4].index("ni -a -d -t a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[4][0][0])
self.assertEqual(22, error_intro.font_attr_segs[4][0][1])
self.assertEqual("ni -a -d -t a/Assign",
error_intro.font_attr_segs[4][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[4][0][2][1])
self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[6][0][0])
self.assertEqual(16, error_intro.font_attr_segs[6][0][1])
self.assertEqual("li -r a/Assign",
error_intro.font_attr_segs[6][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[6][0][2][1])
self.assertEqual(2, error_intro.lines[8].index("lt"))
self.assertEqual(2, error_intro.font_attr_segs[8][0][0])
self.assertEqual(4, error_intro.font_attr_segs[8][0][1])
self.assertEqual("lt", error_intro.font_attr_segs[8][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[8][0][2][1])
self.assertStartsWith(error_intro.lines[11], "Op name:")
self.assertTrue(error_intro.lines[11].endswith("a/Assign"))
self.assertStartsWith(error_intro.lines[12], "Error type:")
self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))
self.assertEqual("Details:", error_intro.lines[14])
self.assertStartsWith(error_intro.lines[15], "foo description")
def testGetErrorIntroForNoOpName(self):
tf_error = errors.OpError(None, None, "Fake OpError", -1)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertIn("Cannot determine the name of the op", error_intro.lines[3])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/cli_shared_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Offline dump analyzer of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.platform import app
def main(_):
if FLAGS.log_usage:
pass # No logging for open-source.
if not FLAGS.dump_dir:
print("ERROR: dump_dir flag is empty.", file=sys.stderr)
sys.exit(1)
print("tfdbg offline: FLAGS.dump_dir = %s" % FLAGS.dump_dir)
debug_dump = debug_data.DebugDumpDir(
FLAGS.dump_dir, validate=FLAGS.validate_graph)
cli = analyzer_cli.create_analyzer_ui(
debug_dump,
tensor_filters={"has_inf_or_nan": debug_data.has_inf_or_nan},
ui_type=FLAGS.ui_type)
title = "tfdbg offline @ %s" % FLAGS.dump_dir
cli.run_ui(title=title, title_color="black_on_white", init_command="lt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--dump_dir", type=str, default="", help="tfdbg dump directory to load")
parser.add_argument(
"--log_usage",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether the usage of this tool is to be logged")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--validate_graph",
nargs="?",
const=True,
type="bool",
default=True,
help="""\
Whether the dumped tensors will be validated against the GraphDefs\
""")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/cli/offline_analyzer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arbitrary expression evaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.debug.cli import evaluator
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ParseDebugTensorNameTest(test_util.TensorFlowTestCase):
def testParseNamesWithoutPrefixOrSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithoutPrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1:DebugNanCount"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithoutDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:hidden_0/Weights:0"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:"
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseMalformedDebugTensorName(self):
with self.assertRaisesRegexp(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount:1337")
with self.assertRaisesRegexp(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/cpu:0:foo:1:DebugNanCount")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1:DebugNanCount[]")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1[DebugNanCount]")
def testParseNamesWithExecIndex(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1[20]"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(20, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0[3]"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(3, exec_index)
class EvaluatorTest(test_util.TensorFlowTestCase):
def testEvaluateSingleTensor(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del node_name, output_slot, debug_op, device_name # Unused.
return [np.array([[1.0, 2.0, 3.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertEqual(3, ev.evaluate("np.size(`a:0`)"))
# Whitespace in backticks should be tolerated.
self.assertEqual(3, ev.evaluate("np.size(` a:0 `)"))
def testEvaluateTwoTensors(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del debug_op, device_name # Unused.
if node_name == "a" and output_slot == 0:
return [np.array([[1.0, -2.0], [0.0, 1.0]])]
elif node_name == "b" and output_slot == 0:
return [np.array([[-1.0], [1.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose([[-3.0], [1.0]],
ev.evaluate("np.matmul(`a:0`, `b:0`)"))
self.assertAllClose(
[[-4.0], [2.0]], ev.evaluate("np.matmul(`a:0`, `b:0`) + `b:0`"))
def testEvaluateNoneExistentTensorGeneratesError(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del node_name, output_slot, debug_op, device_name # Unused.
raise debug_data.WatchKeyDoesNotExistInDebugDumpDirError()
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(
ValueError, "Eval failed due to the value of .* being unavailable"):
ev.evaluate("np.matmul(`a:0`, `b:0`)")
def testEvaluateWithMultipleDevicesContainingTheSameTensorName(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del output_slot, debug_op # Unused.
if node_name == "a" and device_name is None:
raise ValueError(
"There are multiple (2) devices with nodes named 'a' but "
"device_name is not specified")
elif (node_name == "a" and
device_name == "/job:worker/replica:0/task:0/cpu:0"):
return [np.array(10.0)]
elif (node_name == "a" and
device_name == "/job:worker/replica:0/task:1/cpu:0"):
return [np.array(20.0)]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(ValueError, r"multiple \(2\) devices"):
ev.evaluate("`a:0` + `a:0`")
self.assertAllClose(
30.0,
ev.evaluate("`/job:worker/replica:0/task:0/cpu:0:a:0` + "
"`/job:worker/replica:0/task:1/cpu:0:a:0`"))
def testEvaluateWithNonDefaultDebugOp(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del device_name # Unused.
if node_name == "a" and output_slot == 0 and debug_op == "DebugIdentity":
return [np.array([[-1.0], [1.0]])]
elif node_name == "a" and output_slot == 0 and debug_op == "DebugFoo":
return [np.array([[-2.0, 2.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose(
[[4.0]],
ev.evaluate("np.matmul(`a:0:DebugFoo`, `a:0:DebugIdentity`)"))
def testEvaluateWithMultipleExecIndexes(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del debug_op, device_name # Unused.
if node_name == "a" and output_slot == 0:
return [np.array([[-1.0], [1.0]]), np.array([[-2.0], [2.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose(
[[4.0]], ev.evaluate("np.matmul(`a:0[1]`.T, `a:0[0]`)"))
def testEvaluateExpressionWithUnmatchedBacktick(self):
dump = test.mock.MagicMock()
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaises(SyntaxError):
ev.evaluate("np.matmul(`a:0`, `b:0`) + `b:0")
def testEvaluateExpressionWithInvalidDebugTensorName(self):
dump = test.mock.MagicMock()
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(
ValueError, r".* tensor name .* expression .* malformed"):
ev.evaluate("np.matmul(`a`, `b`)")
with self.assertRaisesRegexp(
ValueError, r".* tensor name .* expression .* malformed"):
ev.evaluate("np.matmul(`a:0:DebugIdentity:0`, `b:1:DebugNanCount:2`)")
with self.assertRaises(ValueError):
ev.evaluate("np.matmul(`a:0[]`, `b:0[]`)")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/evaluator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow Debugger command parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ParseCommandTest(test_util.TensorFlowTestCase):
def testParseNoBracketsOrQuotes(self):
command = ""
self.assertEqual([], command_parser.parse_command(command))
command = "a"
self.assertEqual(["a"], command_parser.parse_command(command))
command = "foo bar baz qux"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
command = "foo bar\tbaz\t qux"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
def testParseLeadingTrailingWhitespaces(self):
command = " foo bar baz qux "
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
command = "\nfoo bar baz qux\n"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
def testParseCommandsWithBrackets(self):
command = "pt foo[1, 2, :]"
self.assertEqual(["pt", "foo[1, 2, :]"],
command_parser.parse_command(command))
command = "pt foo[1, 2, :] -a"
self.assertEqual(["pt", "foo[1, 2, :]", "-a"],
command_parser.parse_command(command))
command = "inject_value foo [1, 2,:] 0"
self.assertEqual(["inject_value", "foo", "[1, 2,:]", "0"],
command_parser.parse_command(command))
def testParseCommandWithTwoArgsContainingBrackets(self):
command = "pt foo[1, :] bar[:, 2]"
self.assertEqual(["pt", "foo[1, :]", "bar[:, 2]"],
command_parser.parse_command(command))
command = "pt foo[] bar[:, 2]"
self.assertEqual(["pt", "foo[]", "bar[:, 2]"],
command_parser.parse_command(command))
def testParseCommandWithUnmatchedBracket(self):
command = "pt foo[1, 2, :"
self.assertNotEqual(["pt", "foo[1, 2, :]"],
command_parser.parse_command(command))
def testParseCommandsWithQuotes(self):
command = "inject_value foo \"np.zeros([100, 500])\""
self.assertEqual(["inject_value", "foo", "np.zeros([100, 500])"],
command_parser.parse_command(command))
# The pair of double quotes should have been stripped.
command = "inject_value foo 'np.zeros([100, 500])'"
self.assertEqual(["inject_value", "foo", "np.zeros([100, 500])"],
command_parser.parse_command(command))
# The pair of single quotes should have been stripped.
command = "\"command prefix with spaces\" arg1"
self.assertEqual(["command prefix with spaces", "arg1"],
command_parser.parse_command(command))
def testParseCommandWithTwoArgsContainingQuotes(self):
command = "foo \"bar\" \"qux\""
self.assertEqual(["foo", "bar", "qux"],
command_parser.parse_command(command))
command = "foo \"\" \"qux\""
self.assertEqual(["foo", "", "qux"],
command_parser.parse_command(command))
class ExtractOutputFilePathTest(test_util.TensorFlowTestCase):
def testNoOutputFilePathIsReflected(self):
args, output_path = command_parser.extract_output_file_path(["pt", "a:0"])
self.assertEqual(["pt", "a:0"], args)
self.assertIsNone(output_path)
def testHasOutputFilePathInOneArgsIsReflected(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0", ">/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testHasOutputFilePathInTwoArgsIsReflected(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0", ">", "/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testHasGreaterThanSignButNoFileNameCausesSyntaxError(self):
with self.assertRaisesRegexp(SyntaxError, "Redirect file path is empty"):
command_parser.extract_output_file_path(
["pt", "a:0", ">"])
def testOutputPathMergedWithLastArgIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0>/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testOutputPathInLastArgGreaterThanInSecondLastIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0>", "/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testFlagWithEqualGreaterThanShouldIgnoreIntervalFlags(self):
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time=>100ms"])
self.assertEqual(["lp", "--execution_time=>100ms"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">1.2s"])
self.assertEqual(["lp", "--execution_time", ">1.2s"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "-e", ">1200"])
self.assertEqual(["lp", "-e", ">1200"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--foo_value", ">-.2MB"])
self.assertEqual(["lp", "--foo_value", ">-.2MB"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--bar_value", ">-42e3GB"])
self.assertEqual(["lp", "--bar_value", ">-42e3GB"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">=100ms"])
self.assertEqual(["lp", "--execution_time", ">=100ms"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time=>=100ms"])
self.assertEqual(["lp", "--execution_time=>=100ms"], args)
self.assertIsNone(output_path)
def testFlagWithEqualGreaterThanShouldRecognizeFilePaths(self):
args, output_path = command_parser.extract_output_file_path(
["lp", ">1.2s"])
self.assertEqual(["lp"], args)
self.assertEqual("1.2s", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">x.yms"])
self.assertEqual(["lp", "--execution_time"], args)
self.assertEqual("x.yms", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--memory", ">a.1kB"])
self.assertEqual(["lp", "--memory"], args)
self.assertEqual("a.1kB", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--memory", ">e002MB"])
self.assertEqual(["lp", "--memory"], args)
self.assertEqual("e002MB", output_path)
def testOneArgumentIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(["lt"])
self.assertEqual(["lt"], args)
self.assertIsNone(output_path)
def testEmptyArgumentIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path([])
self.assertEqual([], args)
self.assertIsNone(output_path)
class ParseTensorNameTest(test_util.TensorFlowTestCase):
def testParseTensorNameWithoutSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("", tensor_slicing)
def testParseTensorNameWithSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0[:, 1]")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("[:, 1]", tensor_slicing)
class ValidateSlicingStringTest(test_util.TensorFlowTestCase):
def testValidateValidSlicingStrings(self):
self.assertTrue(command_parser.validate_slicing_string("[1]"))
self.assertTrue(command_parser.validate_slicing_string("[2,3]"))
self.assertTrue(command_parser.validate_slicing_string("[4, 5, 6]"))
self.assertTrue(command_parser.validate_slicing_string("[7,:, :]"))
def testValidateInvalidSlicingStrings(self):
self.assertFalse(command_parser.validate_slicing_string(""))
self.assertFalse(command_parser.validate_slicing_string("[1,"))
self.assertFalse(command_parser.validate_slicing_string("2,3]"))
self.assertFalse(command_parser.validate_slicing_string("[4, foo()]"))
self.assertFalse(command_parser.validate_slicing_string("[5, bar]"))
class ParseIndicesTest(test_util.TensorFlowTestCase):
def testParseValidIndicesStringsWithBrackets(self):
self.assertEqual([0], command_parser.parse_indices("[0]"))
self.assertEqual([0], command_parser.parse_indices(" [0] "))
self.assertEqual([-1, 2], command_parser.parse_indices("[-1, 2]"))
self.assertEqual([3, 4, -5],
command_parser.parse_indices("[3,4,-5]"))
def testParseValidIndicesStringsWithoutBrackets(self):
self.assertEqual([0], command_parser.parse_indices("0"))
self.assertEqual([0], command_parser.parse_indices(" 0 "))
self.assertEqual([-1, 2], command_parser.parse_indices("-1, 2"))
self.assertEqual([3, 4, -5], command_parser.parse_indices("3,4,-5"))
def testParseInvalidIndicesStringsWithoutBrackets(self):
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: 'a'"):
self.assertEqual([0], command_parser.parse_indices("0,a"))
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: '2\]'"):
self.assertEqual([0], command_parser.parse_indices("1, 2]"))
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: ''"):
self.assertEqual([0], command_parser.parse_indices("3, 4,"))
class ParseRangesTest(test_util.TensorFlowTestCase):
INF_VALUE = sys.float_info.max
def testParseEmptyRangeString(self):
self.assertEqual([], command_parser.parse_ranges(""))
self.assertEqual([], command_parser.parse_ranges(" "))
def testParseSingleRange(self):
self.assertAllClose([[-0.1, 0.2]],
command_parser.parse_ranges("[-0.1, 0.2]"))
self.assertAllClose([[-0.1, self.INF_VALUE]],
command_parser.parse_ranges("[-0.1, inf]"))
self.assertAllClose([[-self.INF_VALUE, self.INF_VALUE]],
command_parser.parse_ranges("[-inf, inf]"))
def testParseSingleListOfRanges(self):
self.assertAllClose([[-0.1, 0.2], [10.0, 12.0]],
command_parser.parse_ranges("[[-0.1, 0.2], [10, 12]]"))
self.assertAllClose(
[[-self.INF_VALUE, -1.0], [1.0, self.INF_VALUE]],
command_parser.parse_ranges("[[-inf, -1.0],[1.0, inf]]"))
def testParseInvalidRangeString(self):
with self.assertRaises(SyntaxError):
command_parser.parse_ranges("[[1,2]")
with self.assertRaisesRegexp(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[1,2,3]")
with self.assertRaisesRegexp(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[inf]")
with self.assertRaisesRegexp(ValueError,
"Incorrect type in the 1st element of range"):
command_parser.parse_ranges("[1j, 1]")
with self.assertRaisesRegexp(ValueError,
"Incorrect type in the 2nd element of range"):
command_parser.parse_ranges("[1, 1j]")
class ParseReadableSizeStrTest(test_util.TensorFlowTestCase):
def testParseNoUnitWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0"))
self.assertEqual(1024, command_parser.parse_readable_size_str("1024 "))
self.assertEqual(2000, command_parser.parse_readable_size_str(" 2000 "))
def testParseKiloBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0kB"))
self.assertEqual(1024**2, command_parser.parse_readable_size_str("1024 kB"))
self.assertEqual(1024**2 * 2,
command_parser.parse_readable_size_str("2048k"))
self.assertEqual(1024**2 * 2,
command_parser.parse_readable_size_str("2048kB"))
self.assertEqual(1024 / 4, command_parser.parse_readable_size_str("0.25k"))
def testParseMegaBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0MB"))
self.assertEqual(1024**3, command_parser.parse_readable_size_str("1024 MB"))
self.assertEqual(1024**3 * 2,
command_parser.parse_readable_size_str("2048M"))
self.assertEqual(1024**3 * 2,
command_parser.parse_readable_size_str("2048MB"))
self.assertEqual(1024**2 / 4,
command_parser.parse_readable_size_str("0.25M"))
def testParseGigaBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0GB"))
self.assertEqual(1024**4, command_parser.parse_readable_size_str("1024 GB"))
self.assertEqual(1024**4 * 2,
command_parser.parse_readable_size_str("2048G"))
self.assertEqual(1024**4 * 2,
command_parser.parse_readable_size_str("2048GB"))
self.assertEqual(1024**3 / 4,
command_parser.parse_readable_size_str("0.25G"))
def testParseUnsupportedUnitRaisesException(self):
with self.assertRaisesRegexp(
ValueError, "Failed to parsed human-readable byte size str: \"0foo\""):
command_parser.parse_readable_size_str("0foo")
with self.assertRaisesRegexp(
ValueError, "Failed to parsed human-readable byte size str: \"2E\""):
command_parser.parse_readable_size_str("2EB")
class ParseReadableTimeStrTest(test_util.TensorFlowTestCase):
def testParseNoUnitWorks(self):
self.assertEqual(0, command_parser.parse_readable_time_str("0"))
self.assertEqual(100, command_parser.parse_readable_time_str("100 "))
self.assertEqual(25, command_parser.parse_readable_time_str(" 25 "))
def testParseSeconds(self):
self.assertEqual(1e6, command_parser.parse_readable_time_str("1 s"))
self.assertEqual(2e6, command_parser.parse_readable_time_str("2s"))
def testParseMicros(self):
self.assertEqual(2, command_parser.parse_readable_time_str("2us"))
def testParseMillis(self):
self.assertEqual(2e3, command_parser.parse_readable_time_str("2ms"))
def testParseUnsupportedUnitRaisesException(self):
with self.assertRaisesRegexp(
ValueError, r".*float.*2us.*"):
command_parser.parse_readable_time_str("2uss")
with self.assertRaisesRegexp(
ValueError, r".*float.*2m.*"):
command_parser.parse_readable_time_str("2m")
with self.assertRaisesRegexp(
ValueError, r"Invalid time -1. Time value must be positive."):
command_parser.parse_readable_time_str("-1s")
class ParseInterval(test_util.TensorFlowTestCase):
def testParseTimeInterval(self):
self.assertEquals(
command_parser.Interval(10, True, 1e3, True),
command_parser.parse_time_interval("[10us, 1ms]"))
self.assertEquals(
command_parser.Interval(10, False, 1e3, False),
command_parser.parse_time_interval("(10us, 1ms)"))
self.assertEquals(
command_parser.Interval(10, False, 1e3, True),
command_parser.parse_time_interval("(10us, 1ms]"))
self.assertEquals(
command_parser.Interval(10, True, 1e3, False),
command_parser.parse_time_interval("[10us, 1ms)"))
self.assertEquals(command_parser.Interval(0, False, 1e3, True),
command_parser.parse_time_interval("<=1ms"))
self.assertEquals(
command_parser.Interval(1e3, True, float("inf"), False),
command_parser.parse_time_interval(">=1ms"))
self.assertEquals(command_parser.Interval(0, False, 1e3, False),
command_parser.parse_time_interval("<1ms"))
self.assertEquals(
command_parser.Interval(1e3, False, float("inf"), False),
command_parser.parse_time_interval(">1ms"))
def testParseTimeGreaterLessThanWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
command_parser.parse_time_interval(">=wms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
command_parser.parse_time_interval(">Yms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
command_parser.parse_time_interval("<= _ms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
command_parser.parse_time_interval("<-ms")
def testParseTimeIntervalsWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("[wms, 10ms]")
with self.assertRaisesRegexp(ValueError,
"Invalid second item in interval:"):
command_parser.parse_time_interval("[ 0ms, _ms]")
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("(xms, _ms]")
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("((3ms, _ms)")
def testInvalidTimeIntervalRaisesException(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval format: \[10us, 1ms. Valid formats are: "
r"\[min, max\], \(min, max\), <max, >min"):
command_parser.parse_time_interval("[10us, 1ms")
with self.assertRaisesRegexp(
ValueError,
r"Incorrect interval format: \[10us, 1ms, 2ms\]. Interval should "
r"specify two values: \[min, max\] or \(min, max\)"):
command_parser.parse_time_interval("[10us, 1ms, 2ms]")
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval \[1s, 1ms\]. Start must be before end of interval."):
command_parser.parse_time_interval("[1s, 1ms]")
def testParseMemoryInterval(self):
self.assertEquals(
command_parser.Interval(1024, True, 2048, True),
command_parser.parse_memory_interval("[1k, 2k]"))
self.assertEquals(
command_parser.Interval(1024, False, 2048, False),
command_parser.parse_memory_interval("(1kB, 2kB)"))
self.assertEquals(
command_parser.Interval(1024, False, 2048, True),
command_parser.parse_memory_interval("(1k, 2k]"))
self.assertEquals(
command_parser.Interval(1024, True, 2048, False),
command_parser.parse_memory_interval("[1k, 2k)"))
self.assertEquals(
command_parser.Interval(0, False, 2048, True),
command_parser.parse_memory_interval("<=2k"))
self.assertEquals(
command_parser.Interval(11, True, float("inf"), False),
command_parser.parse_memory_interval(">=11"))
self.assertEquals(command_parser.Interval(0, False, 2048, False),
command_parser.parse_memory_interval("<2k"))
self.assertEquals(
command_parser.Interval(11, False, float("inf"), False),
command_parser.parse_memory_interval(">11"))
def testParseMemoryIntervalsWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
command_parser.parse_time_interval(">=wM")
with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
command_parser.parse_time_interval(">YM")
with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
command_parser.parse_time_interval("<= _MB")
with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
command_parser.parse_time_interval("<-MB")
def testInvalidMemoryIntervalRaisesException(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval \[5k, 3k\]. Start of interval must be less than or "
"equal to end of interval."):
command_parser.parse_memory_interval("[5k, 3k]")
def testIntervalContains(self):
interval = command_parser.Interval(
start=1, start_included=True, end=10, end_included=True)
self.assertTrue(interval.contains(1))
self.assertTrue(interval.contains(10))
self.assertTrue(interval.contains(5))
interval.start_included = False
self.assertFalse(interval.contains(1))
self.assertTrue(interval.contains(10))
interval.end_included = False
self.assertFalse(interval.contains(1))
self.assertFalse(interval.contains(10))
interval.start_included = True
self.assertTrue(interval.contains(1))
self.assertFalse(interval.contains(10))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/command_parser_test.py
|
tensorflow-master
|
tensorflow/python/debug/cli/__init__.py
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profile_analyzer_cli."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import profile_analyzer_cli
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def _line_number_above():
return tf_inspect.stack()[1][2] - 1
def _at_least_one_line_matches(pattern, lines):
pattern_re = re.compile(pattern)
for i, line in enumerate(lines):
if pattern_re.search(line):
return True, i
return False, None
def _assert_at_least_one_line_matches(pattern, lines):
any_match, _ = _at_least_one_line_matches(pattern, lines)
if not any_match:
raise AssertionError(
"%s does not match any line in %s." % (pattern, str(lines)))
def _assert_no_lines_match(pattern, lines):
any_match, _ = _at_least_one_line_matches(pattern, lines)
if any_match:
raise AssertionError(
"%s matched at least one line in %s." % (pattern, str(lines)))
@test_util.run_v1_only("b/120545219")
class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
def testNodeInfoEmpty(self):
graph = ops.Graph()
run_metadata = config_pb2.RunMetadata()
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
self.assertEquals([""], prof_output)
def testSingleDevice(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=3)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file1", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches(r"Device 1 of 1: deviceA", prof_output)
_assert_at_least_one_line_matches(r"^Add/123.*add.*2us.*4us", prof_output)
_assert_at_least_one_line_matches(r"^Mul/456.*mul.*1us.*3us", prof_output)
def testMultipleDevices(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=3)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1])
device2 = run_metadata.step_stats.dev_stats.add()
device2.device = "deviceB"
device2.node_stats.extend([node1])
graph = test.mock.MagicMock()
op = test.mock.MagicMock()
op.name = "Add/123"
op.traceback = [("a/b/file1", 10, "some_var")]
op.type = "abc"
graph.get_operations.return_value = [op]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches(r"Device 1 of 2: deviceA", prof_output)
_assert_at_least_one_line_matches(r"Device 2 of 2: deviceB", prof_output)
# Try filtering by device.
prof_output = prof_analyzer.list_profile(["-d", "deviceB"]).lines
_assert_at_least_one_line_matches(r"Device 2 of 2: deviceB", prof_output)
_assert_no_lines_match(r"Device 1 of 2: deviceA", prof_output)
def testWithSession(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
with session.Session(config=no_rewrite_session_config()) as sess:
a = constant_op.constant([1, 2, 3])
b = constant_op.constant([2, 2, 1])
result = math_ops.add(a, b)
sess.run(result, options=options, run_metadata=run_metadata)
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(
sess.graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches("Device 1 of", prof_output)
expected_headers = [
"Node", r"Start Time \(us\)", r"Op Time \(.*\)", r"Exec Time \(.*\)",
r"Filename:Lineno\(function\)"]
_assert_at_least_one_line_matches(
".*".join(expected_headers), prof_output)
_assert_at_least_one_line_matches(r"^Add/", prof_output)
_assert_at_least_one_line_matches(r"Device Total", prof_output)
def testSorting(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Default sort by start time (i.e. all_start_micros).
prof_output = prof_analyzer.list_profile([]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
# Default sort in reverse.
prof_output = prof_analyzer.list_profile(["-r"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by name.
prof_output = prof_analyzer.list_profile(["-s", "node"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by op time (i.e. op_end_rel_micros - op_start_rel_micros).
prof_output = prof_analyzer.list_profile(["-s", "op_time"]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
# Sort by exec time (i.e. all_end_rel_micros).
prof_output = prof_analyzer.list_profile(["-s", "exec_time"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by line number.
prof_output = prof_analyzer.list_profile(["-s", "line"]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
def testFiltering(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Filter by name
prof_output = prof_analyzer.list_profile(["-n", "Add"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
# Filter by op_type
prof_output = prof_analyzer.list_profile(["-t", "mul"]).lines
_assert_at_least_one_line_matches(r"Mul/456", prof_output)
_assert_no_lines_match(r"Add/123", prof_output)
# Filter by file name.
prof_output = prof_analyzer.list_profile(["-f", ".*file2"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
# Fitler by execution time.
prof_output = prof_analyzer.list_profile(["-e", "[5, 10]"]).lines
_assert_at_least_one_line_matches(r"Mul/456", prof_output)
_assert_no_lines_match(r"Add/123", prof_output)
# Fitler by op time.
prof_output = prof_analyzer.list_profile(["-o", ">=2"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
def testSpecifyingTimeUnit(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Force time unit.
prof_output = prof_analyzer.list_profile(["--time_unit", "ms"]).lines
_assert_at_least_one_line_matches(r"Add/123.*add.*0\.002ms", prof_output)
_assert_at_least_one_line_matches(r"Mul/456.*mul.*0\.005ms", prof_output)
_assert_at_least_one_line_matches(r"Device Total.*0\.009ms", prof_output)
@test_util.run_v1_only("b/120545219")
class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
def setUp(self):
super(ProfileAnalyzerPrintSourceTest, self).setUp()
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
with session.Session() as sess:
loop_cond = lambda x: math_ops.less(x, 10)
self.loop_cond_lineno = _line_number_above()
loop_body = lambda x: math_ops.add(x, 1)
self.loop_body_lineno = _line_number_above()
x = constant_op.constant(0, name="x")
self.x_lineno = _line_number_above()
loop = control_flow_ops.while_loop(loop_cond, loop_body, [x])
self.loop_lineno = _line_number_above()
self.assertEqual(
10, sess.run(loop, options=options, run_metadata=run_metadata))
self.prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(
sess.graph, run_metadata)
def tearDown(self):
ops.reset_default_graph()
super(ProfileAnalyzerPrintSourceTest, self).tearDown()
def testPrintSourceForWhileLoop(self):
prof_output = self.prof_analyzer.print_source([__file__])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceOutputContainsClickableLinks(self):
prof_output = self.prof_analyzer.print_source([__file__])
any_match, line_index = _at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
self.assertTrue(any_match)
any_menu_item_match = False
for seg in prof_output.font_attr_segs[line_index]:
if (isinstance(seg[2][1], debugger_cli_common.MenuItem) and
seg[2][1].content.startswith("lp --file_path_filter ") and
"--min_lineno %d" % self.loop_cond_lineno in seg[2][1].content and
"--max_lineno %d" % (self.loop_cond_lineno + 1) in seg[2][1].content):
any_menu_item_match = True
break
self.assertTrue(any_menu_item_match)
def testPrintSourceWithNonDefaultTimeUnit(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--time_unit", "ms"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceWithNodeNameFilter(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--node_name_filter", "x$"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(1\) .*L%d.*(\S)+" % self.x_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
# Check clickable link.
_, line_index = _at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(1\) .*L%d.*(\S)+" % self.x_lineno,
prof_output.lines)
any_menu_item_match = False
for seg in prof_output.font_attr_segs[line_index]:
if (isinstance(seg[2][1], debugger_cli_common.MenuItem) and
seg[2][1].content.startswith("lp --file_path_filter ") and
"--node_name_filter x$" in seg[2][1].content and
"--min_lineno %d" % self.x_lineno in seg[2][1].content and
"--max_lineno %d" % (self.x_lineno + 1) in seg[2][1].content):
any_menu_item_match = True
break
self.assertTrue(any_menu_item_match)
def testPrintSourceWithOpTypeFilter(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--op_type_filter", "Less"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(11\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceWithNonexistentDeviceGivesCorrectErrorMessage(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--device_name_filter", "foo_device"])
_assert_at_least_one_line_matches(
r"The source file .* does not contain any profile information for the "
"previous Session run", prof_output.lines)
_assert_at_least_one_line_matches(
r".*--device_name_filter: foo_device", prof_output.lines)
def testPrintSourceWithUnrelatedFileShowsCorrectErrorMessage(self):
prof_output = self.prof_analyzer.print_source([tf_inspect.__file__])
_assert_at_least_one_line_matches(
r"The source file .* does not contain any profile information for the "
"previous Session run", prof_output.lines)
def testPrintSourceOutputContainsInitScrollPosAnnotation(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--init_line", str(self.loop_cond_lineno)])
self.assertEqual(
self.loop_cond_lineno + 1, # The extra line is due to the head lines.
prof_output.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/profile_analyzer_cli_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for curses-based CLI widgets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.debug.cli import curses_widgets
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
RTL = debugger_cli_common.RichTextLines
CNH = curses_widgets.CursesNavigationHistory
class CNHTest(test_util.TensorFlowTestCase):
def testConstructorWorks(self):
CNH(10)
def testConstructorWithInvalidCapacityErrors(self):
with self.assertRaises(ValueError):
CNH(0)
with self.assertRaises(ValueError):
CNH(-1)
def testInitialStateIsCorrect(self):
nav_history = CNH(10)
self.assertEqual(0, nav_history.size())
self.assertFalse(nav_history.can_go_forward())
self.assertFalse(nav_history.can_go_back())
with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
nav_history.go_back()
with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
nav_history.go_forward()
with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
nav_history.update_scroll_position(3)
def testAddOneItemWorks(self):
nav_history = CNH(10)
nav_history.add_item("foo", RTL(["bar"]), 0)
self.assertEqual(1, nav_history.size())
self.assertEqual(0, nav_history.pointer())
self.assertFalse(nav_history.can_go_forward())
self.assertFalse(nav_history.can_go_back())
output = nav_history.go_back()
self.assertEqual("foo", output.command)
self.assertEqual(["bar"], output.screen_output.lines)
self.assertEqual(0, output.scroll_position)
def testAddItemsBeyondCapacityWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_output"]), 0)
nav_history.add_item("bar", RTL(["bar_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
nav_history.add_item("baz", RTL(["baz_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
item = nav_history.go_back()
self.assertEqual("bar", item.command)
self.assertFalse(nav_history.can_go_back())
self.assertTrue(nav_history.can_go_forward())
item = nav_history.go_forward()
self.assertEqual("baz", item.command)
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
def testAddItemFromNonLatestPointerPositionWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_output"]), 0)
nav_history.add_item("bar", RTL(["bar_output"]), 0)
nav_history.go_back()
nav_history.add_item("baz", RTL(["baz_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
item = nav_history.go_back()
self.assertEqual("foo", item.command)
item = nav_history.go_forward()
self.assertEqual("baz", item.command)
def testUpdateScrollPositionOnLatestItemWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.update_scroll_position(1)
nav_history.go_back()
item = nav_history.go_forward()
self.assertEqual("bar", item.command)
self.assertEqual(1, item.scroll_position)
def testUpdateScrollPositionOnOldItemWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
item = nav_history.go_back()
self.assertEqual("foo", item.command)
self.assertEqual(0, item.scroll_position)
nav_history.update_scroll_position(1)
nav_history.go_forward()
item = nav_history.go_back()
self.assertEqual("foo", item.command)
self.assertEqual(1, item.scroll_position)
item = nav_history.go_forward()
self.assertEqual("bar", item.command)
self.assertEqual(0, item.scroll_position)
def testRenderWithEmptyHistoryWorks(self):
nav_history = CNH(2)
output = nav_history.render(40, "prev", "next")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT,
output.lines[0])
self.assertEqual({}, output.font_attr_segs)
def testRenderLatestWithSufficientLengthWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | bar",
output.lines[0])
self.assertEqual(2, output.font_attr_segs[0][0][0])
self.assertEqual(5, output.font_attr_segs[0][0][1])
self.assertEqual("prev", output.font_attr_segs[0][0][2].content)
self.assertEqual(12, output.font_attr_segs[0][1][0])
self.assertEqual(15, output.font_attr_segs[0][1][1])
self.assertEqual("green", output.font_attr_segs[0][1][2])
def testRenderOldButNotOldestWithSufficientLengthWorks(self):
nav_history = CNH(3)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.add_item("baz", RTL(["baz_out", "more_baz_out"]), 0)
nav_history.go_back()
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | (-1) bar",
output.lines[0])
self.assertEqual(2, output.font_attr_segs[0][0][0])
self.assertEqual(5, output.font_attr_segs[0][0][1])
self.assertEqual("prev", output.font_attr_segs[0][0][2].content)
self.assertEqual(6, output.font_attr_segs[0][1][0])
self.assertEqual(9, output.font_attr_segs[0][1][1])
self.assertEqual("next", output.font_attr_segs[0][1][2].content)
self.assertEqual(12, output.font_attr_segs[0][2][0])
self.assertEqual(17, output.font_attr_segs[0][2][1])
self.assertEqual("yellow", output.font_attr_segs[0][2][2])
self.assertEqual(17, output.font_attr_segs[0][3][0])
self.assertEqual(20, output.font_attr_segs[0][3][1])
self.assertEqual("yellow", output.font_attr_segs[0][3][2])
def testRenderOldestWithSufficientLengthWorks(self):
nav_history = CNH(3)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.add_item("baz", RTL(["baz_out", "more_baz_out"]), 0)
nav_history.go_back()
nav_history.go_back()
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | (-2) foo",
output.lines[0])
self.assertEqual(6, output.font_attr_segs[0][0][0])
self.assertEqual(9, output.font_attr_segs[0][0][1])
self.assertEqual("next", output.font_attr_segs[0][0][2].content)
self.assertEqual(12, output.font_attr_segs[0][1][0])
self.assertEqual(17, output.font_attr_segs[0][1][1])
self.assertEqual("yellow", output.font_attr_segs[0][1][2])
self.assertEqual(17, output.font_attr_segs[0][2][0])
self.assertEqual(20, output.font_attr_segs[0][2][1])
self.assertEqual("yellow", output.font_attr_segs[0][2][2])
def testRenderWithInsufficientLengthWorks(self):
nav_history = CNH(2)
nav_history.add_item("long_command", RTL(["output"]), 0)
output = nav_history.render(
15,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | lon",
output.lines[0])
self.assertEqual(12, output.font_attr_segs[0][0][0])
self.assertEqual(15, output.font_attr_segs[0][0][1])
self.assertEqual("green", output.font_attr_segs[0][0][2])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/curses_widgets_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities for tfdbg command-line interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
def assert_lines_equal_ignoring_whitespace(test, expected_lines, actual_lines):
"""Assert equality in lines, ignoring all whitespace.
Args:
test: An instance of unittest.TestCase or its subtypes (e.g.,
TensorFlowTestCase).
expected_lines: Expected lines as an iterable of strings.
actual_lines: Actual lines as an iterable of strings.
"""
test.assertEqual(
len(expected_lines), len(actual_lines),
"Mismatch in the number of lines: %d vs %d" % (
len(expected_lines), len(actual_lines)))
for expected_line, actual_line in zip(expected_lines, actual_lines):
test.assertEqual("".join(expected_line.split()),
"".join(actual_line.split()))
# Regular expression for separators between values in a string representation
# of an ndarray, exclusing whitespace.
_ARRAY_VALUE_SEPARATOR_REGEX = re.compile(r"(array|\(|\[|\]|\)|\||,)")
def assert_array_lines_close(test, expected_array, array_lines):
"""Assert that the array value represented by lines is close to expected.
Note that the shape of the array represented by the `array_lines` is ignored.
Args:
test: An instance of TensorFlowTestCase.
expected_array: Expected value of the array.
array_lines: A list of strings representing the array.
E.g., "array([[ 1.0, 2.0 ], [ 3.0, 4.0 ]])"
Assumes that values are separated by commas, parentheses, brackets, "|"
characters and whitespace.
"""
elements = []
for line in array_lines:
line = re.sub(_ARRAY_VALUE_SEPARATOR_REGEX, " ", line)
elements.extend(float(s) for s in line.split())
test.assertAllClose(np.array(expected_array).flatten(), elements)
|
tensorflow-master
|
tensorflow/python/debug/cli/cli_test_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cli_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shutil
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CLIConfigTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CLIConfigTest, self).setUp()
def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(CLIConfigTest, self).tearDown()
def testConstructCLIConfigWithoutFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
self.assertEqual(True, config.get("mouse_mode"))
with self.assertRaises(KeyError):
config.get("property_that_should_not_exist")
self.assertTrue(gfile.Exists(self._tmp_config_path))
def testCLIConfigForwardCompatibilityTest(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with open(self._tmp_config_path, "rt") as f:
config_json = json.load(f)
# Remove a field to simulate forward compatibility test.
del config_json["graph_recursion_depth"]
with open(self._tmp_config_path, "wt") as f:
json.dump(config_json, f)
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
def testModifyConfigValue(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
self.assertEqual(9, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCasting(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", "18")
config.set("mouse_mode", "false")
self.assertEqual(18, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCastingFailure(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(ValueError):
config.set("mouse_mode", "maybe")
def testLoadFromModifiedConfigFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(9, config2.get("graph_recursion_depth"))
self.assertEqual(False, config2.get("mouse_mode"))
def testSummarizeFromConfig(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize()
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
def testSummarizeFromConfigWithHighlight(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize(highlight="mouse_mode")
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
self.assertEqual((2, 12, ["underline", "bold"]),
output.font_attr_segs[3][0])
self.assertEqual((14, 18, "bold"), output.font_attr_segs[3][1])
def testSetCallback(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
test_value = {"graph_recursion_depth": -1}
def callback(config):
test_value["graph_recursion_depth"] = config.get("graph_recursion_depth")
config.set_callback("graph_recursion_depth", callback)
config.set("graph_recursion_depth", config.get("graph_recursion_depth") - 1)
self.assertEqual(test_value["graph_recursion_depth"],
config.get("graph_recursion_depth"))
def testSetCallbackInvalidPropertyName(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(KeyError):
config.set_callback("nonexistent_property_name", print)
def testSetCallbackNotCallable(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(TypeError):
config.set_callback("graph_recursion_depth", 1)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/cli_config_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import os
import signal
import sys
import threading
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import base_ui
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import curses_widgets
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_UP_A_LINE = "up_a_line"
_SCROLL_DOWN_A_LINE = "down_a_line"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
_SCROLL_TO_LINE_INDEX = "scroll_to_line_index"
_COLOR_READY_COLORTERMS = ["gnome-terminal", "xfce4-terminal"]
_COLOR_ENABLED_TERM = "xterm-256color"
def _get_command_from_line_attr_segs(mouse_x, attr_segs):
"""Attempt to extract command from the attribute segments of a line.
Args:
mouse_x: (int) x coordinate of the mouse event.
attr_segs: (list) The list of attribute segments of a line from a
RichTextLines object.
Returns:
(str or None) If a command exists: the command as a str; otherwise, None.
"""
for seg in attr_segs:
if seg[0] <= mouse_x < seg[1]:
attributes = seg[2] if isinstance(seg[2], list) else [seg[2]]
for attr in attributes:
if isinstance(attr, debugger_cli_common.MenuItem):
return attr.content
class ScrollBar(object):
"""Vertical ScrollBar for Curses-based CLI.
An object of this class has knowledge of the location of the scroll bar
in the screen coordinates, the current scrolling position, and the total
number of text lines in the screen text. By using this information, it
can generate text rendering of the scroll bar, which consists of and UP
button on the top and a DOWN button on the bottom, in addition to a scroll
block in between, whose exact location is determined by the scrolling
position. The object can also calculate the scrolling command (e.g.,
_SCROLL_UP_A_LINE, _SCROLL_DOWN) from the coordinate of a mouse click
event in the screen region it occupies.
"""
BASE_ATTR = cli_shared.COLOR_BLACK + "_on_" + cli_shared.COLOR_WHITE
def __init__(self,
min_x,
min_y,
max_x,
max_y,
scroll_position,
output_num_rows):
"""Constructor of ScrollBar.
Args:
min_x: (int) left index of the scroll bar on the screen (inclusive).
min_y: (int) top index of the scroll bar on the screen (inclusive).
max_x: (int) right index of the scroll bar on the screen (inclusive).
max_y: (int) bottom index of the scroll bar on the screen (inclusive).
scroll_position: (int) 0-based location of the screen output. For example,
if the screen output is scrolled to the top, the value of
scroll_position should be 0. If it is scrolled to the bottom, the value
should be output_num_rows - 1.
output_num_rows: (int) Total number of output rows.
Raises:
ValueError: If the width or height of the scroll bar, as determined
by min_x, max_x, min_y and max_y, is too small.
"""
self._min_x = min_x
self._min_y = min_y
self._max_x = max_x
self._max_y = max_y
self._scroll_position = scroll_position
self._output_num_rows = output_num_rows
self._scroll_bar_height = max_y - min_y + 1
if self._max_x < self._min_x:
raise ValueError("Insufficient width for ScrollBar (%d)" %
(self._max_x - self._min_x + 1))
if self._max_y < self._min_y + 3:
raise ValueError("Insufficient height for ScrollBar (%d)" %
(self._max_y - self._min_y + 1))
def _block_y(self, screen_coord_sys=False):
"""Get the 0-based y coordinate of the scroll block.
This y coordinate takes into account the presence of the UP and DN buttons
present at the top and bottom of the ScrollBar. For example, at the home
location, the return value will be 1; at the bottom location, the return
value will be self._scroll_bar_height - 2.
Args:
screen_coord_sys: (`bool`) whether the return value will be in the
screen coordinate system.
Returns:
(int) 0-based y coordinate of the scroll block, in the ScrollBar
coordinate system by default. For example,
when scroll position is at the top, this return value will be 1 (not 0,
because of the presence of the UP button). When scroll position is at
the bottom, this return value will be self._scroll_bar_height - 2
(not self._scroll_bar_height - 1, because of the presence of the DOWN
button).
"""
rel_block_y = int(
float(self._scroll_position) / (self._output_num_rows - 1) *
(self._scroll_bar_height - 3)) + 1
return rel_block_y + self._min_y if screen_coord_sys else rel_block_y
def layout(self):
"""Get the RichTextLines layout of the scroll bar.
Returns:
(debugger_cli_common.RichTextLines) The text layout of the scroll bar.
"""
width = self._max_x - self._min_x + 1
empty_line = " " * width
foreground_font_attr_segs = [(0, width, self.BASE_ATTR)]
if self._output_num_rows > 1:
block_y = self._block_y()
if width == 1:
up_text = "U"
down_text = "D"
elif width == 2:
up_text = "UP"
down_text = "DN"
elif width == 3:
up_text = "UP "
down_text = "DN "
else:
up_text = " UP "
down_text = "DOWN"
layout = debugger_cli_common.RichTextLines(
[up_text], font_attr_segs={0: [(0, width, self.BASE_ATTR)]})
for i in xrange(1, self._scroll_bar_height - 1):
font_attr_segs = foreground_font_attr_segs if i == block_y else None
layout.append(empty_line, font_attr_segs=font_attr_segs)
layout.append(down_text, font_attr_segs=foreground_font_attr_segs)
else:
layout = debugger_cli_common.RichTextLines(
[empty_line] * self._scroll_bar_height)
return layout
def get_click_command(self, mouse_y):
if self._output_num_rows <= 1:
return None
elif mouse_y == self._min_y:
return _SCROLL_UP_A_LINE
elif mouse_y == self._max_y:
return _SCROLL_DOWN_A_LINE
elif (mouse_y > self._block_y(screen_coord_sys=True) and
mouse_y < self._max_y):
return _SCROLL_DOWN
elif (mouse_y < self._block_y(screen_coord_sys=True) and
mouse_y > self._min_y):
return _SCROLL_UP
else:
return None
class CursesUI(base_ui.BaseUI):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
BACKSPACE_KEY = ord("\b")
REGEX_SEARCH_PREFIX = "/"
TENSOR_INDICES_NAVIGATION_PREFIX = "@"
_NAVIGATION_FORWARD_COMMAND = "next"
_NAVIGATION_BACK_COMMAND = "prev"
# Limit screen width to work around the limitation of the curses library that
# it may return invalid x coordinates for large values.
_SCREEN_WIDTH_LIMIT = 220
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_KEY_MAP = {
127: curses.KEY_BACKSPACE, # Backspace
curses.KEY_DC: 4, # Delete
}
_FOREGROUND_COLORS = {
cli_shared.COLOR_WHITE: curses.COLOR_WHITE,
cli_shared.COLOR_RED: curses.COLOR_RED,
cli_shared.COLOR_GREEN: curses.COLOR_GREEN,
cli_shared.COLOR_YELLOW: curses.COLOR_YELLOW,
cli_shared.COLOR_BLUE: curses.COLOR_BLUE,
cli_shared.COLOR_CYAN: curses.COLOR_CYAN,
cli_shared.COLOR_MAGENTA: curses.COLOR_MAGENTA,
cli_shared.COLOR_BLACK: curses.COLOR_BLACK,
}
_BACKGROUND_COLORS = {
"transparent": -1,
cli_shared.COLOR_WHITE: curses.COLOR_WHITE,
cli_shared.COLOR_BLACK: curses.COLOR_BLACK,
}
# Font attribute for search and highlighting.
_SEARCH_HIGHLIGHT_FONT_ATTR = (
cli_shared.COLOR_BLACK + "_on_" + cli_shared.COLOR_WHITE)
_ARRAY_INDICES_COLOR_PAIR = (
cli_shared.COLOR_BLACK + "_on_" + cli_shared.COLOR_WHITE)
_ERROR_TOAST_COLOR_PAIR = (
cli_shared.COLOR_RED + "_on_" + cli_shared.COLOR_WHITE)
_INFO_TOAST_COLOR_PAIR = (
cli_shared.COLOR_BLUE + "_on_" + cli_shared.COLOR_WHITE)
_STATUS_BAR_COLOR_PAIR = (
cli_shared.COLOR_BLACK + "_on_" + cli_shared.COLOR_WHITE)
_UI_WAIT_COLOR_PAIR = (
cli_shared.COLOR_MAGENTA + "_on_" + cli_shared.COLOR_WHITE)
_NAVIGATION_WARNING_COLOR_PAIR = (
cli_shared.COLOR_RED + "_on_" + cli_shared.COLOR_WHITE)
_UI_WAIT_MESSAGE = "Processing..."
# The delay (in ms) between each update of the scroll bar when the mouse
# button is held down on the scroll bar. Controls how fast the screen scrolls.
_MOUSE_SCROLL_DELAY_MS = 100
_single_instance_lock = threading.Lock()
def __init__(self, on_ui_exit=None, config=None):
"""Constructor of CursesUI.
Args:
on_ui_exit: (Callable) Callback invoked when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
"""
base_ui.BaseUI.__init__(self, on_ui_exit=on_ui_exit, config=config)
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
self._nav_history = curses_widgets.CursesNavigationHistory(10)
# State related to screen output.
self._output_pad = None
self._output_pad_row = 0
self._output_array_pointer_indices = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
try:
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
except ValueError:
# Running in a child thread, can't catch signals.
pass
self.register_command_handler(
"mouse",
self._mouse_mode_command_handler,
"Get or set the mouse mode of this CLI: (on|off)",
prefix_aliases=["m"])
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Row index of the Navigation Bar (i.e., the bar that contains forward and
# backward buttons and displays the current command line).
self._nav_bar_row = 1
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 2
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
self.max_output_lines = 10000
# Regex search state.
self._curr_search_regex = None
self._unwrapped_regex_match_lines = []
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 2
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
# If the terminal type is color-ready, enable it.
if os.getenv("COLORTERM") in _COLOR_READY_COLORTERMS:
os.environ["TERM"] = _COLOR_ENABLED_TERM
self._stdscr = curses.initscr()
self._command_window = None
self._screen_color_init()
def _screen_color_init(self):
"""Initialization of screen colors."""
curses.start_color()
curses.use_default_colors()
self._color_pairs = {}
color_index = 0
# Prepare color pairs.
for fg_color in self._FOREGROUND_COLORS:
for bg_color in self._BACKGROUND_COLORS:
color_index += 1
curses.init_pair(color_index, self._FOREGROUND_COLORS[fg_color],
self._BACKGROUND_COLORS[bg_color])
color_name = fg_color
if bg_color != "transparent":
color_name += "_on_" + bg_color
self._color_pairs[color_name] = curses.color_pair(color_index)
# Try getting color(s) available only under 256-color support.
try:
color_index += 1
curses.init_pair(color_index, 245, -1)
self._color_pairs[cli_shared.COLOR_GRAY] = curses.color_pair(color_index)
except curses.error:
# Use fall-back color(s):
self._color_pairs[cli_shared.COLOR_GRAY] = (
self._color_pairs[cli_shared.COLOR_GREEN])
# A_BOLD or A_BLINK is not really a "color". But place it here for
# convenience.
self._color_pairs["bold"] = curses.A_BOLD
self._color_pairs["blink"] = curses.A_BLINK
self._color_pairs["underline"] = curses.A_UNDERLINE
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs[cli_shared.COLOR_WHITE]
def _screen_launch(self, enable_mouse_on_start):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._mouse_enabled = self.config.get("mouse_mode")
self._screen_set_mousemask()
self.config.set_callback(
"mouse_mode",
lambda cfg: self._set_mouse_enabled(cfg.get("mouse_mode")))
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
try:
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
except ValueError:
# Can't catch signals unless you're the main thread.
pass
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
# Only one instance of the Curses UI can be running at a time, since
# otherwise they would try to both read from the same keystrokes, and write
# to the same screen.
self._single_instance_lock.acquire()
self._screen_launch(enable_mouse_on_start=enable_mouse_on_start)
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
self._screen_terminate()
self._single_instance_lock.release()
return exit_token
def get_help(self):
return self._command_handler_registry.get_help()
def _addstr(self, *args):
try:
self._stdscr.addstr(*args)
except curses.error:
pass
def _refresh_pad(self, pad, *args):
try:
pad.refresh(*args)
except curses.error:
pass
def _screen_create_command_textbox(self, existing_command=None):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
try:
command, terminator, pending_command_changed = self._get_user_command()
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
if not command and terminator != self.CLI_TAB_KEY:
continue
if terminator in self.CLI_CR_KEYS or terminator == curses.KEY_MOUSE:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
if not command:
return command
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
if self._max_x > self._SCREEN_WIDTH_LIMIT:
self._max_x = self._SCREEN_WIDTH_LIMIT
def _navigate_screen_output(self, command):
"""Navigate in screen output history.
Args:
command: (`str`) the navigation command, from
{self._NAVIGATION_FORWARD_COMMAND, self._NAVIGATION_BACK_COMMAND}.
"""
if command == self._NAVIGATION_FORWARD_COMMAND:
if self._nav_history.can_go_forward():
item = self._nav_history.go_forward()
scroll_position = item.scroll_position
else:
self._toast("At the LATEST in navigation history!",
color=self._NAVIGATION_WARNING_COLOR_PAIR)
return
else:
if self._nav_history.can_go_back():
item = self._nav_history.go_back()
scroll_position = item.scroll_position
else:
self._toast("At the OLDEST in navigation history!",
color=self._NAVIGATION_WARNING_COLOR_PAIR)
return
self._display_output(item.screen_output)
if scroll_position != 0:
self._scroll_output(_SCROLL_TO_LINE_INDEX, line_index=scroll_position)
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if self._output_pad:
self._toast(self._UI_WAIT_MESSAGE, color=self._UI_WAIT_COLOR_PAIR)
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
elif (command == self._NAVIGATION_FORWARD_COMMAND or
command == self._NAVIGATION_BACK_COMMAND):
self._navigate_screen_output(command)
return
if command:
self._command_history_store.add_command(command)
if (command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
if len(command) > len(self.REGEX_SEARCH_PREFIX):
# Command is like "/regex". Perform regex search.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
self._curr_search_regex = regex
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
elif self._unwrapped_regex_match_lines:
# Command is "/". Continue scrolling down matching lines.
self._display_output(
self._curr_unwrapped_output,
is_refresh=True,
highlight_regex=self._curr_search_regex)
self._command_pointer = 0
self._pending_command = ""
return
elif command.startswith(self.TENSOR_INDICES_NAVIGATION_PREFIX):
indices_str = command[1:].strip()
if indices_str:
try:
indices = command_parser.parse_indices(indices_str)
omitted, line_index, _, _ = tensor_format.locate_tensor_element(
self._curr_wrapped_output, indices)
if not omitted:
self._scroll_output(
_SCROLL_TO_LINE_INDEX, line_index=line_index)
except Exception as e: # pylint: disable=broad-except
self._error_toast(str(e))
else:
self._error_toast("Empty indices.")
return
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
self._error_toast(str(e))
return
if not prefix:
# Empty command: take no action. Should not exit.
return
# Take into account scroll bar width.
screen_info = {"cols": self._max_x - 2}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._nav_history.add_item(command, screen_output, 0)
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
self._info_toast("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
self._error_toast("Failed to write output to %s" % output_file_path)
self._command_pointer = 0
self._pending_command = ""
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
debugger_cli_common.CommandLineExit: If a mouse-triggered command returns
an exit token when dispatched.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(_SCROLL_UP_A_LINE)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(_SCROLL_DOWN_A_LINE)
return x
elif x == curses.KEY_HOME:
self._scroll_output(_SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(_SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
self._redraw_output()
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_MOUSE and self._mouse_enabled:
try:
_, mouse_x, mouse_y, _, mouse_event_type = self._screen_getmouse()
except curses.error:
mouse_event_type = None
if mouse_event_type == curses.BUTTON1_PRESSED:
# Logic for held mouse-triggered scrolling.
if mouse_x >= self._max_x - 2:
# Disable blocking on checking for user input.
self._command_window.nodelay(True)
# Loop while mouse button is pressed.
while mouse_event_type == curses.BUTTON1_PRESSED:
# Sleep for a bit.
curses.napms(self._MOUSE_SCROLL_DELAY_MS)
scroll_command = self._scroll_bar.get_click_command(mouse_y)
if scroll_command in (_SCROLL_UP_A_LINE, _SCROLL_DOWN_A_LINE):
self._scroll_output(scroll_command)
# Check to see if different mouse event is in queue.
self._command_window.getch()
try:
_, _, _, _, mouse_event_type = self._screen_getmouse()
except curses.error:
pass
self._command_window.nodelay(False)
return x
elif mouse_event_type == curses.BUTTON1_RELEASED:
# Logic for mouse-triggered scrolling.
if mouse_x >= self._max_x - 2:
scroll_command = self._scroll_bar.get_click_command(mouse_y)
if scroll_command is not None:
self._scroll_output(scroll_command)
return x
else:
command = self._fetch_hyperlink_command(mouse_x, mouse_y)
if command:
self._screen_create_command_textbox()
exit_token = self._dispatch_command(command)
if exit_token is not None:
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return self._KEY_MAP.get(x, x)
def _screen_getmouse(self):
return curses.getmouse()
def _redraw_output(self):
if self._curr_unwrapped_output is not None:
self._display_nav_bar()
self._display_main_menu(self._curr_unwrapped_output)
self._display_output(self._curr_unwrapped_output, is_refresh=True)
def _fetch_hyperlink_command(self, mouse_x, mouse_y):
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
if mouse_y == self._nav_bar_row and self._nav_bar:
# Click was in the nav bar.
return _get_command_from_line_attr_segs(mouse_x,
self._nav_bar.font_attr_segs[0])
elif mouse_y == self._output_top_row and self._main_menu_pad:
# Click was in the menu bar.
return _get_command_from_line_attr_segs(mouse_x,
self._main_menu.font_attr_segs[0])
else:
absolute_mouse_y = mouse_y + self._output_pad_row - output_top
if absolute_mouse_y in self._curr_wrapped_output.font_attr_segs:
return _get_command_from_line_attr_segs(
mouse_x, self._curr_wrapped_output.font_attr_segs[absolute_mouse_y])
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command, erase_existing=False):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string or None.
erase_existing: (bool) whether existing text (if any) is to be erased
first.
"""
if erase_existing:
self._erase_existing_command()
command = command or ""
for c in command:
self._command_textbox.do_command(ord(c))
def _erase_existing_command(self):
"""Erase existing text in command textpad."""
existing_len = len(self._command_textbox.gather())
for _ in xrange(existing_len):
self._command_textbox.do_command(self.BACKSPACE_KEY)
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
color_pair = (self._default_color_pair if color is None else
self._color_pairs[color])
self._addstr(row, 0, line, color_pair | attr)
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _screen_display_output(self, output):
"""Actually render text output on the screen.
Wraps the lines according to screen width. Pad lines below according to
screen height so that the user can scroll the output to a state where
the last non-empty line is on the top of the screen. Then renders the
lines on the screen.
Args:
output: (RichTextLines) text lines to display on the screen. These lines
may have widths exceeding the screen width. This method will take care
of the wrapping.
Returns:
(List of int) A list of line indices, in the wrapped output, where there
are regex matches.
"""
# Wrap the output lines according to screen width.
self._curr_wrapped_output, wrapped_line_indices = (
debugger_cli_common.wrap_rich_text_lines(output, self._max_x - 2))
# Append lines to curr_wrapped_output so that the user can scroll to a
# state where the last text line is on the top of the output area.
self._curr_wrapped_output.lines.extend([""] * (self._output_num_rows - 1))
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), cli_shared.COLOR_MAGENTA)
]
self._display_nav_bar()
self._display_main_menu(self._curr_wrapped_output)
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# The indices of lines with regex matches (if any) need to be mapped to
# indices of wrapped lines.
return [
wrapped_line_indices[line]
for line in self._unwrapped_regex_match_lines
]
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
This method does some preprocessing on the text lines, render them on the
screen and scroll to the appropriate line. These are done according to regex
highlighting requests (if any), scroll-to-next-match requests (if any),
and screen refresh requests (if any).
TODO(cais): Separate these unrelated request to increase clarity and
maintainability.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if not output:
return
if highlight_regex:
try:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._SEARCH_HIGHLIGHT_FONT_ATTR)
except ValueError as e:
self._error_toast(str(e))
return
if not is_refresh:
# Perform new regex search on the current output.
self._unwrapped_regex_match_lines = output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY]
else:
# Continue scrolling down.
self._output_pad_row += 1
else:
self._curr_unwrapped_output = output
self._unwrapped_regex_match_lines = []
# Display output on the screen.
wrapped_regex_match_lines = self._screen_display_output(output)
# Now that the text lines are displayed on the screen scroll to the
# appropriate line according to previous scrolling state and regex search
# and highlighting state.
if highlight_regex:
next_match_line = -1
for match_line in wrapped_regex_match_lines:
if match_line >= self._output_pad_row:
next_match_line = match_line
break
if next_match_line >= 0:
self._scroll_output(
_SCROLL_TO_LINE_INDEX, line_index=next_match_line)
else:
# Regex search found no match >= current line number. Display message
# stating as such.
self._toast("Pattern not found", color=self._ERROR_TOAST_COLOR_PAIR)
elif is_refresh:
self._scroll_output(_SCROLL_REFRESH)
elif debugger_cli_common.INIT_SCROLL_POS_KEY in output.annotations:
line_index = output.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY]
self._scroll_output(_SCROLL_TO_LINE_INDEX, line_index=line_index)
else:
self._output_pad_row = 0
self._scroll_output(_SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the main text body.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 2
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _display_nav_bar(self):
nav_bar_width = self._max_x - 2
self._nav_bar_pad = self._screen_new_output_pad(1, nav_bar_width)
self._nav_bar = self._nav_history.render(
nav_bar_width,
self._NAVIGATION_BACK_COMMAND,
self._NAVIGATION_FORWARD_COMMAND)
self._screen_add_line_to_output_pad(
self._nav_bar_pad, 0, self._nav_bar.lines[0][:nav_bar_width - 1],
color_segments=(self._nav_bar.font_attr_segs[0]
if 0 in self._nav_bar.font_attr_segs else None))
def _display_main_menu(self, output):
"""Display main menu associated with screen output, if the menu exists.
Args:
output: (debugger_cli_common.RichTextLines) The RichTextLines output from
the annotations field of which the menu will be extracted and used (if
the menu exists).
"""
if debugger_cli_common.MAIN_MENU_KEY in output.annotations:
self._main_menu = output.annotations[
debugger_cli_common.MAIN_MENU_KEY].format_as_single_line(
prefix="| ", divider=" | ", enabled_item_attrs=["underline"])
self._main_menu_pad = self._screen_new_output_pad(1, self._max_x - 2)
# The unwrapped menu line may exceed screen width, in which case it needs
# to be cut off.
wrapped_menu, _ = debugger_cli_common.wrap_rich_text_lines(
self._main_menu, self._max_x - 3)
self._screen_add_line_to_output_pad(
self._main_menu_pad,
0,
wrapped_menu.lines[0],
color_segments=(wrapped_menu.font_attr_segs[0]
if 0 in wrapped_menu.font_attr_segs else None))
else:
self._main_menu = None
self._main_menu_pad = None
def _pad_line_end_with_whitespace(self, pad, row, line_end_x):
"""Pad the whitespace at the end of a line with the default color pair.
Prevents spurious color pairs from appearing at the end of the lines in
certain text terimnals.
Args:
pad: The curses pad object to operate on.
row: (`int`) row index.
line_end_x: (`int`) column index of the end of the line (beginning of
the whitespace).
"""
if line_end_x < self._max_x - 2:
pad.addstr(row, line_end_x, " " * (self._max_x - 3 - line_end_x),
self._default_color_pair)
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
self._pad_line_end_with_whitespace(pad, row, len(txt))
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_attrs), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
if not isinstance(curr_attrs, list):
curr_attrs = [curr_attrs]
curses_attr = curses.A_NORMAL
for attr in curr_attrs:
if (self._mouse_enabled and
isinstance(attr, debugger_cli_common.MenuItem)):
curses_attr |= curses.A_UNDERLINE
else:
curses_attr |= self._color_pairs.get(attr, self._default_color_pair)
all_color_pairs.append(curses_attr)
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
if segment[1] < self._max_x:
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
if all_segments:
self._pad_line_end_with_whitespace(pad, row, all_segments[-1][1])
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
self._refresh_pad(pad, viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
self._scroll_bar = ScrollBar(
self._max_x - 2,
3,
self._max_x - 1,
self._output_num_rows + 1,
self._output_pad_row,
self._output_pad_height - self._output_pad_screen_height)
(scroll_pad, _, _) = self._display_lines(
self._scroll_bar.layout(), self._output_num_rows - 1)
self._refresh_pad(scroll_pad, 0, 0, self._output_top_row + 1,
self._max_x - 2, self._output_num_rows + 1,
self._max_x - 1)
def _scroll_output(self, direction, line_index=None):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_UP_A_LINE,
_SCROLL_DOWN_A_LINE, _SCROLL_HOME, _SCROLL_END, _SCROLL_TO_LINE_INDEX
line_index: (int) Specifies the zero-based line index to scroll to.
Applicable only if direction is _SCROLL_TO_LINE_INDEX.
Raises:
ValueError: On invalid scroll direction.
TypeError: If line_index is not int and direction is
_SCROLL_TO_LINE_INDEX.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == _SCROLL_REFRESH:
pass
elif direction == _SCROLL_UP:
# Scroll up.
self._output_pad_row -= int(self._output_num_rows / 3)
if self._output_pad_row < 0:
self._output_pad_row = 0
elif direction == _SCROLL_DOWN:
# Scroll down.
self._output_pad_row += int(self._output_num_rows / 3)
if (self._output_pad_row >
self._output_pad_height - self._output_pad_screen_height - 1):
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == _SCROLL_UP_A_LINE:
# Scroll up a line
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == _SCROLL_DOWN_A_LINE:
# Scroll down a line
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == _SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == _SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == _SCROLL_TO_LINE_INDEX:
if not isinstance(line_index, int):
raise TypeError("Invalid line_index type (%s) under mode %s" %
(type(line_index), _SCROLL_TO_LINE_INDEX))
self._output_pad_row = line_index
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
self._nav_history.update_scroll_position(self._output_pad_row)
# Actually scroll the output pad: refresh with new location.
output_pad_top = self._output_pad_screen_location.top
if self._main_menu_pad:
output_pad_top += 1
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
output_pad_top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
self._screen_render_nav_bar()
self._screen_render_menu_pad()
self._scroll_info = self._compile_ui_status_summary()
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
def _screen_render_nav_bar(self):
if self._nav_bar_pad:
self._refresh_pad(self._nav_bar_pad, 0, 0, self._nav_bar_row, 0,
self._output_pad_screen_location.top, self._max_x)
def _screen_render_menu_pad(self):
if self._main_menu_pad:
self._refresh_pad(
self._main_menu_pad, 0, 0, self._output_pad_screen_location.top, 0,
self._output_pad_screen_location.top, self._max_x)
def _compile_ui_status_summary(self):
"""Compile status summary about this Curses UI instance.
The information includes: scroll status and mouse ON/OFF status.
Returns:
(str) A single text line summarizing the UI status, adapted to the
current screen width.
"""
info = ""
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
scroll_percentage = 100.0 * (min(
1.0,
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1)))
if self._output_pad_row == 0:
scroll_directions = " (PgDn)"
elif self._output_pad_row >= (
self._output_pad_height - self._output_pad_screen_height - 1):
scroll_directions = " (PgUp)"
else:
scroll_directions = " (PgDn/PgUp)"
info += "--- Scroll%s: %.2f%% " % (scroll_directions, scroll_percentage)
self._output_array_pointer_indices = self._show_array_indices()
# Add array indices information to scroll message.
if self._output_array_pointer_indices:
if self._output_array_pointer_indices[0]:
info += self._format_indices(self._output_array_pointer_indices[0])
info += "-"
if self._output_array_pointer_indices[-1]:
info += self._format_indices(self._output_array_pointer_indices[-1])
info += " "
# Add mouse mode information.
mouse_mode_str = "Mouse: "
mouse_mode_str += "ON" if self._mouse_enabled else "OFF"
if len(info) + len(mouse_mode_str) + 5 < self._max_x:
info += "-" * (self._max_x - len(info) - len(mouse_mode_str) - 4)
info += " "
info += mouse_mode_str
info += " ---"
else:
info += "-" * (self._max_x - len(info))
return info
def _format_indices(self, indices):
# Remove the spaces to make it compact.
return repr(indices).replace(" ", "")
def _show_array_indices(self):
"""Show array indices for the lines at the top and bottom of the output.
For the top line and bottom line of the output display area, show the
element indices of the array being displayed.
Returns:
If either the top of the bottom row has any matching array indices,
a dict from line index (0 being the top of the display area, -1
being the bottom of the display area) to array element indices. For
example:
{0: [0, 0], -1: [10, 0]}
Otherwise, None.
"""
indices_top = self._show_array_index_at_line(0)
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
bottom_line_index = (
self._output_pad_screen_location.bottom - output_top - 1)
indices_bottom = self._show_array_index_at_line(bottom_line_index)
if indices_top or indices_bottom:
return {0: indices_top, -1: indices_bottom}
else:
return None
def _show_array_index_at_line(self, line_index):
"""Show array indices for the specified line in the display area.
Uses the line number to array indices map in the annotations field of the
RichTextLines object being displayed.
If the displayed RichTextLines object does not contain such a mapping,
will do nothing.
Args:
line_index: (int) 0-based line index from the top of the display area.
For example,if line_index == 0, this method will display the array
indices for the line currently at the top of the display area.
Returns:
(list) The array indices at the specified line, if available. None, if
not available.
"""
# Examine whether the index information is available for the specified line
# number.
pointer = self._output_pad_row + line_index
if (pointer in self._curr_wrapped_output.annotations and
"i0" in self._curr_wrapped_output.annotations[pointer]):
indices = self._curr_wrapped_output.annotations[pointer]["i0"]
array_indices_str = self._format_indices(indices)
array_indices_info = "@" + array_indices_str
# TODO(cais): Determine line_index properly given menu pad status.
# Test coverage?
output_top = self._output_top_row
if self._main_menu_pad:
output_top += 1
self._toast(
array_indices_info,
color=self._ARRAY_INDICES_COLOR_PAIR,
line_index=output_top + line_index)
return indices
else:
return None
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
context, prefix, except_last_word = self._analyze_tab_complete_input(
command_str)
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return except_last_word + common_prefix
else:
return except_last_word + prefix
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(_SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output, _ = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 3)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 2)
def _toast(self, message, color=None, line_index=None):
"""Display a one-line message on the screen.
By default, the toast is displayed in the line right above the scroll bar.
But the line location can be overridden with the line_index arg.
Args:
message: (str) the message to display.
color: (str) optional color attribute for the message.
line_index: (int) line index.
"""
pad, _, _ = self._display_lines(
debugger_cli_common.RichTextLines(
message,
font_attr_segs={
0: [(0, len(message), color or cli_shared.COLOR_WHITE)]}),
0)
right_end = min(len(message), self._max_x - 2)
if line_index is None:
line_index = self._output_scroll_row - 1
self._screen_scroll_output_pad(pad, 0, 0, line_index, 0, line_index,
right_end)
def _error_toast(self, message):
"""Display a one-line error message on screen.
Args:
message: The error message, without the preceding "ERROR: " substring.
"""
self._toast(
self.ERROR_MESSAGE_PREFIX + message, color=self._ERROR_TOAST_COLOR_PAIR)
def _info_toast(self, message):
"""Display a one-line informational message on screen.
Args:
message: The informational message.
"""
self._toast(
self.INFO_MESSAGE_PREFIX + message, color=self._INFO_TOAST_COLOR_PAIR)
def _interrupt_handler(self, signal_num, frame):
del signal_num # Unused.
del frame # Unused.
if self._on_ui_exit:
self._on_ui_exit()
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
def _mouse_mode_command_handler(self, args, screen_info=None):
"""Handler for the command prefix 'mouse'.
Args:
args: (list of str) Arguments to the command prefix 'mouse'.
screen_info: (dict) Information about the screen, unused by this handler.
Returns:
None, as this command handler does not generate any screen outputs other
than toasts.
"""
del screen_info
if not args or len(args) == 1:
if args:
if args[0].lower() == "on":
enabled = True
elif args[0].lower() == "off":
enabled = False
else:
self._error_toast("Invalid mouse mode: %s" % args[0])
return None
self._set_mouse_enabled(enabled)
mode_str = "on" if self._mouse_enabled else "off"
self._info_toast("Mouse mode: %s" % mode_str)
else:
self._error_toast("mouse_mode: syntax error")
return None
def _set_mouse_enabled(self, enabled):
if self._mouse_enabled != enabled:
self._mouse_enabled = enabled
self._screen_set_mousemask()
self._redraw_output()
def _screen_set_mousemask(self):
if self._mouse_enabled:
curses.mousemask(curses.BUTTON1_RELEASED | curses.BUTTON1_PRESSED)
else:
curses.mousemask(0)
|
tensorflow-master
|
tensorflow/python/debug/cli/curses_ui.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functions and classes for tfdbg command-line interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.debug.lib import common
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
RL = debugger_cli_common.RichLine
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
DEFAULT_NDARRAY_DISPLAY_THRESHOLD = 2000
COLOR_BLACK = "black"
COLOR_BLUE = "blue"
COLOR_CYAN = "cyan"
COLOR_GRAY = "gray"
COLOR_GREEN = "green"
COLOR_MAGENTA = "magenta"
COLOR_RED = "red"
COLOR_WHITE = "white"
COLOR_YELLOW = "yellow"
TIME_UNIT_US = "us"
TIME_UNIT_MS = "ms"
TIME_UNIT_S = "s"
TIME_UNITS = [TIME_UNIT_US, TIME_UNIT_MS, TIME_UNIT_S]
def bytes_to_readable_str(num_bytes, include_b=False):
"""Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
"""
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / 1024.0)
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / 1048576.0)
else:
result = "%.2fG" % (num_bytes / 1073741824.0)
if include_b:
result += "B"
return result
def time_to_readable_str(value_us, force_time_unit=None):
"""Convert time value to human-readable string.
Args:
value_us: time value in microseconds.
force_time_unit: force the output to use the specified time unit. Must be
in TIME_UNITS.
Returns:
Human-readable string representation of the time value.
Raises:
ValueError: if force_time_unit value is not in TIME_UNITS.
"""
if not value_us:
return "0"
if force_time_unit:
if force_time_unit not in TIME_UNITS:
raise ValueError("Invalid time unit: %s" % force_time_unit)
order = TIME_UNITS.index(force_time_unit)
time_unit = force_time_unit
return "{:.10g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
else:
order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))
time_unit = TIME_UNITS[order]
return "{:.3g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
def parse_ranges_highlight(ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for range_start, range_end in ranges:
r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def numpy_printoptions_from_screen_info(screen_info):
if screen_info and "cols" in screen_info:
return {"linewidth": screen_info["cols"]}
else:
return {}
def format_tensor(tensor,
tensor_name,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None,
include_numeric_summary=False,
write_path=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
write_path: A path to save the tensor value (after any slicing) to
(optional). `numpy.save()` is used to save the value.
Returns:
An instance of `debugger_cli_common.RichTextLines` representing the
(potentially sliced) tensor.
"""
if tensor_slicing:
# Validate the indexing.
value = command_parser.evaluate_tensor_slice(tensor, tensor_slicing)
sliced_name = tensor_name + tensor_slicing
else:
value = tensor
sliced_name = tensor_name
auxiliary_message = None
if write_path:
with gfile.Open(write_path, "wb") as output_file:
np.save(output_file, value)
line = debugger_cli_common.RichLine("Saved value to: ")
line += debugger_cli_common.RichLine(write_path, font_attr="bold")
line += " (%sB)" % bytes_to_readable_str(gfile.Stat(write_path).length)
auxiliary_message = debugger_cli_common.rich_text_lines_from_rich_line_list(
[line, debugger_cli_common.RichLine("")])
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = DEFAULT_NDARRAY_DISPLAY_THRESHOLD
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
include_numeric_summary=include_numeric_summary,
auxiliary_message=auxiliary_message,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL("ERROR: " + msg, COLOR_RED)])
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
if create_link:
font_attr = [debugger_cli_common.MenuItem("", command), "bold"]
else:
font_attr = "bold"
lines = [RL(indent_str) + RL(command, font_attr) + ":",
indent_str + " " + description]
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def get_tfdbg_logo():
"""Make an ASCII representation of the tfdbg logo."""
lines = [
"",
"TTTTTT FFFF DDD BBBB GGG ",
" TT F D D B B G ",
" TT FFF D D BBBB G GG",
" TT F D D B B G G",
" TT F DDD BBBB GGG ",
"",
]
return debugger_cli_common.RichTextLines(lines)
_HORIZONTAL_BAR = "======================================"
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters,
is_callable_runner=False):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = common.get_flattened_names(fetches)
if not feed_dict:
feed_dict_lines = [debugger_cli_common.RichLine(" (Empty)")]
else:
feed_dict_lines = []
for feed_key in feed_dict:
feed_key_name = common.get_graph_element_name(feed_key)
feed_dict_line = debugger_cli_common.RichLine(" ")
feed_dict_line += debugger_cli_common.RichLine(
feed_key_name,
debugger_cli_common.MenuItem(None, "pf '%s'" % feed_key_name))
# Surround the name string with quotes, because feed_key_name may contain
# spaces in some cases, e.g., SparseTensors.
feed_dict_lines.append(feed_dict_line)
feed_dict_lines = debugger_cli_common.rich_text_lines_from_rich_line_list(
feed_dict_lines)
out = debugger_cli_common.RichTextLines(_HORIZONTAL_BAR)
if is_callable_runner:
out.append("Running a runner returned by Session.make_callable()")
else:
out.append("Session.run() call #%d:" % run_call_count)
out.append("")
out.append("Fetch(es):")
out.extend(debugger_cli_common.RichTextLines(
[" " + line for line in fetch_lines]))
out.append("")
out.append("Feed dict:")
out.extend(feed_dict_lines)
out.append(_HORIZONTAL_BAR)
out.append("")
out.append("Select one of the following commands to proceed ---->")
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() once more with debugging and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_lines.append(RL(" * ") + RL(filter_name, command_menu_node))
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))
out.append("")
out.append_rich_line(RL("For more details, see ") +
RL("help.", debugger_cli_common.MenuItem("", "help")) +
".")
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem("exit", "exit"))
out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return out
def get_run_short_description(run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Get a short description of the run() call.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(str) A short description of the run() call, including information about
the fetche(s) and feed(s).
"""
if is_callable_runner:
return "runner from make_callable()"
description = "run #%d: " % run_call_count
if isinstance(fetches, (ops.Tensor, ops.Operation, variables.Variable)):
description += "1 fetch (%s); " % common.get_graph_element_name(fetches)
else:
# Could be (nested) list, tuple, dict or namedtuple.
num_fetches = len(common.get_flattened_names(fetches))
if num_fetches > 1:
description += "%d fetches; " % num_fetches
else:
description += "%d fetch; " % num_fetches
if not feed_dict:
description += "0 feeds"
else:
if len(feed_dict) == 1:
for key in feed_dict:
description += "1 feed (%s)" % (
key if isinstance(key, six.string_types) or not hasattr(key, "name")
else key.name)
else:
description += "%d feeds" % len(feed_dict)
return description
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
if hasattr(tf_error, "op") and hasattr(tf_error.op, "name"):
op_name = tf_error.op.name
else:
op_name = None
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
if op_name is not None:
out.extend(debugger_cli_common.RichTextLines(
["You may use the following commands to debug:"]))
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
else:
out.extend(debugger_cli_common.RichTextLines([
"WARNING: Cannot determine the name of the op that caused the error."]))
more_lines = [
"",
"Op name: %s" % op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
|
tensorflow-master
|
tensorflow/python/debug/cli/cli_shared.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import tempfile
import threading
import numpy as np
from six.moves import queue
from tensorflow.python.debug.cli import cli_test_utils
from tensorflow.python.debug.cli import curses_ui
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def string_to_codes(cmd):
return [ord(c) for c in cmd]
def codes_to_string(cmd_code):
# Omit non-ASCII key codes.
return "".join([chr(code) for code in cmd_code if code < 256])
class MockCursesUI(curses_ui.CursesUI):
"""Mock subclass of CursesUI that bypasses actual terminal manipulations."""
def __init__(self,
height,
width,
command_sequence=None):
self._height = height
self._width = width
self._command_sequence = command_sequence
self._command_counter = 0
# The mock class has no actual textbox. So use this variable to keep
# track of what's entered in the textbox on creation.
self._curr_existing_command = ""
# Observers for test.
# Observers of screen output.
self.unwrapped_outputs = []
self.wrapped_outputs = []
self.scroll_messages = []
self.output_array_pointer_indices = []
self.output_pad_rows = []
# Observers of command textbox.
self.existing_commands = []
# Observer for tab-completion candidates.
self.candidates_lists = []
# Observer for the main menu.
self.main_menu_list = []
# Observer for toast messages.
self.toasts = []
curses_ui.CursesUI.__init__(self)
# Override the default path to the command history file to avoid test
# concurrency issues.
self._command_history_store = debugger_cli_common.CommandHistory(
history_file_path=tempfile.mktemp())
# Below, override the _screen_ prefixed member methods that interact with the
# actual terminal, so that the mock can run in a terminal-less environment.
# TODO(cais): Search for a way to have a mock terminal object that behaves
# like the actual terminal, so that we can test the terminal interaction
# parts of the CursesUI class.
def _screen_init(self):
pass
def _screen_refresh_size(self):
self._max_y = self._height
self._max_x = self._width
def _screen_launch(self, enable_mouse_on_start):
self._mouse_enabled = enable_mouse_on_start
def _screen_terminate(self):
pass
def _screen_refresh(self):
pass
def _screen_create_command_window(self):
pass
def _screen_create_command_textbox(self, existing_command=None):
"""Override to insert observer of existing commands.
Used in testing of history navigation and tab completion.
Args:
existing_command: Command string entered to the textbox at textbox
creation time. Note that the textbox does not actually exist in this
mock subclass. This method only keeps track of and records the state.
"""
self.existing_commands.append(existing_command)
self._curr_existing_command = existing_command
def _screen_new_output_pad(self, rows, cols):
return "mock_pad"
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
pass
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
pass
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pass
def _screen_get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_key_counter = 0
for c in command:
if c == curses.KEY_RESIZE:
# Special case for simulating a terminal resize event in curses.
self._height = command[1]
self._width = command[2]
self._on_textbox_keypress(c)
self._command_counter += 1
return ""
elif c == curses.KEY_MOUSE:
mouse_x = command[1]
mouse_y = command[2]
self._command_counter += 1
self._textbox_curr_terminator = c
return self._fetch_hyperlink_command(mouse_x, mouse_y)
else:
y = self._on_textbox_keypress(c)
self._command_key_counter += 1
if y == curses_ui.CursesUI.CLI_TERMINATOR_KEY:
break
self._command_counter += 1
# Take into account pre-existing string automatically entered on textbox
# creation.
return self._curr_existing_command + codes_to_string(command)
def _screen_getmouse(self):
output = (0, self._mouse_xy_sequence[self._mouse_counter][0],
self._mouse_xy_sequence[self._mouse_counter][1], 0,
curses.BUTTON1_CLICKED)
self._mouse_counter += 1
return output
def _screen_gather_textbox_str(self):
return codes_to_string(self._command_sequence[self._command_counter]
[:self._command_key_counter])
def _scroll_output(self, direction, line_index=None):
"""Override to observe screen output.
This method is invoked after every command that generates a new screen
output and after every keyboard triggered screen scrolling. Therefore
it is a good place to insert the observer.
Args:
direction: which direction to scroll.
line_index: (int or None) Optional line index to scroll to. See doc string
of the overridden method for more information.
"""
curses_ui.CursesUI._scroll_output(self, direction, line_index=line_index)
self.unwrapped_outputs.append(self._curr_unwrapped_output)
self.wrapped_outputs.append(self._curr_wrapped_output)
self.scroll_messages.append(self._scroll_info)
self.output_array_pointer_indices.append(self._output_array_pointer_indices)
self.output_pad_rows.append(self._output_pad_row)
def _display_main_menu(self, output):
curses_ui.CursesUI._display_main_menu(self, output)
self.main_menu_list.append(self._main_menu)
def _screen_render_nav_bar(self):
pass
def _screen_render_menu_pad(self):
pass
def _display_candidates(self, candidates):
curses_ui.CursesUI._display_candidates(self, candidates)
self.candidates_lists.append(candidates)
def _toast(self, message, color=None, line_index=None):
curses_ui.CursesUI._toast(self, message, color=color, line_index=line_index)
self.toasts.append(message)
class CursesTest(test_util.TensorFlowTestCase):
_EXIT = string_to_codes("exit\n")
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
ap.add_argument(
"-l",
"--line",
dest="line",
type=str,
default="bar",
help="The content of each line")
ap.add_argument(
"-k",
"--link",
dest="link",
action="store_true",
help="Create a command link on each line")
ap.add_argument(
"-m",
"--menu",
dest="menu",
action="store_true",
help="Create a menu for testing")
parsed = ap.parse_args(args)
lines = [parsed.line] * parsed.num_times
font_attr_segs = {}
if parsed.link:
for i in range(len(lines)):
font_attr_segs[i] = [(
0,
len(lines[i]),
debugger_cli_common.MenuItem("", "babble"),)]
annotations = {}
if parsed.menu:
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem("babble again", "babble"))
menu.append(
debugger_cli_common.MenuItem("ahoy", "ahoy", enabled=False))
annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
return output
def _print_ones(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Print all-one matrix.", usage=argparse.SUPPRESS)
ap.add_argument(
"-s",
"--size",
dest="size",
type=int,
default=3,
help="Size of the matrix. For example, of the value is 3, "
"the matrix will have shape (3, 3)")
parsed = ap.parse_args(args)
m = np.ones([parsed.size, parsed.size])
return tensor_format.format_tensor(m, "m")
def testInitialization(self):
ui = MockCursesUI(40, 80)
self.assertEqual(0, ui._command_pointer)
self.assertEqual([], ui._active_command_history)
self.assertEqual("", ui._pending_command)
def testCursesUiInChildThreadStartsWithoutException(self):
result = queue.Queue()
def child_thread():
try:
MockCursesUI(40, 80)
except ValueError as e:
result.put(e)
t = threading.Thread(target=child_thread)
t.start()
t.join()
self.assertTrue(result.empty())
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockCursesUI(40, 80, command_sequence=[[], self._EXIT])
ui.run_ui()
# Empty command should not lead to any screen output.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIInvalidCommandPrefix(self):
"""Handle an unregistered command prefix."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("foo\n"), self._EXIT])
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.unwrapped_outputs[0].lines)
# TODO(cais): Add explanation for the 35 extra lines.
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.wrapped_outputs[0].lines[:1])
# A single line of output should not have caused scrolling.
self.assertNotIn("Scroll", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
def testRunUIInvalidCommandSyntax(self):
"""Handle a command with invalid syntax."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -z\n"), self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertIn("Mouse:", ui.scroll_messages[0])
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
def testRunUIScrollTallOutputPageDownUp(self):
"""Scroll tall output with PageDown and PageUp."""
# Use PageDown and PageUp to scroll back and forth a little before exiting.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"), [curses.KEY_NPAGE] * 2 +
[curses.KEY_PPAGE] + self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
# After 1st scrolling (PageDown).
# The screen output shouldn't have changed. Only the viewport should.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[1])
self.assertIn("Mouse:", ui.scroll_messages[1])
# After 2nd scrolling (PageDown).
self.assertIn("Scroll (PgDn/PgUp): 3.39%", ui.scroll_messages[2])
self.assertIn("Mouse:", ui.scroll_messages[2])
# After 3rd scrolling (PageUp).
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[3])
self.assertIn("Mouse:", ui.scroll_messages[3])
def testCutOffTooManyOutputLines(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -n 20\n"), self._EXIT])
# Modify max_output_lines so that this test doesn't use too much time or
# memory.
ui.max_output_lines = 10
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["bar"] * 10 + ["Output cut off at 10 lines!"],
ui.wrapped_outputs[0].lines[:11])
def testRunUIScrollTallOutputEndHome(self):
"""Scroll tall output with PageDown and PageUp."""
# Use End and Home to scroll a little before exiting to test scrolling.
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble\n"),
[curses.KEY_END] * 2 + [curses.KEY_HOME] + self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
# After 2nd scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[2])
# After 3rd scrolling (Hhome).
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testCompileHelpWithoutHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
def testCompileHelpWithHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
help_intro = debugger_cli_common.RichTextLines(
["This is a curses UI.", "All it can do is 'babble'.", ""])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.set_help_intro(help_intro)
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
help_intro.lines + ["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:7])
def testCommandHistoryNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
[curses.KEY_UP], # Hit Up and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
for i in [0, 1]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
def testCommandHistoryNavBackwardTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up twice and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardOverLimit(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up three times and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardThenForward(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_DOWN], # Hit Up twice and Down once.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st output is for command "help".
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
# The 2nd and 3rd outputs are for command "babble".
for i in [1, 2]:
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[i].lines)
def testCommandHistoryPrefixNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\n"),
string_to_codes("babble -n 10\n"),
string_to_codes("help\n"),
string_to_codes("b") + [curses.KEY_UP], # Navigate with prefix.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["bar"], ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[1].lines)
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[2].lines[:4])
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[3].lines)
def testTerminalResize(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"),
[curses.KEY_RESIZE, 100, 85], # Resize to [100, 85]
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The resize event should have caused a second screen output event.
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(2, len(ui.wrapped_outputs))
self.assertEqual(2, len(ui.scroll_messages))
# The 1st and 2nd screen outputs should be identical (unwrapped).
self.assertEqual(ui.unwrapped_outputs[0], ui.unwrapped_outputs[1])
# The 1st scroll info should contain scrolling, because the screen size
# is less than the number of lines in the output.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testTabCompletionWithCommonPrefix(self):
# Type "b" and trigger tab completion.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["ba"])
ui.run_ui()
# The automatically registered exit commands "exit" and "quit" should not
# appear in the tab completion candidates because they don't start with
# "b".
self.assertEqual([["ba", "babble"]], ui.candidates_lists)
# "ba" is a common prefix of the two candidates. So the "ba" command should
# have been issued after the Enter.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionEmptyTriggerWithoutCommonPrefix(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
# Use a different alias "a" instead.
ui.run_ui()
# The manually registered command, along with the automatically registered
# exit commands should appear in the candidates.
self.assertEqual(
[["a", "babble", "cfg", "config", "exit", "h", "help", "m", "mouse",
"quit"]], ui.candidates_lists)
# The two candidates have no common prefix. So no command should have been
# issued.
self.assertEqual(0, len(ui.unwrapped_outputs))
self.assertEqual(0, len(ui.wrapped_outputs))
self.assertEqual(0, len(ui.scroll_messages))
def testTabCompletionNonemptyTriggerSingleCandidate(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# There is only one candidate, so no candidates should have been displayed.
# Instead, the completion should have been automatically keyed in, leading
# to the "babble" command being issue.
self.assertEqual([[]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionNoMatch(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("c\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# Only the invalid command "c" should have been issued.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.unwrapped_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.wrapped_outputs[0].lines[:1])
def testTabCompletionOneWordContext(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "20", "30", "300"])
ui.run_ui()
self.assertEqual([["30", "300"]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 30, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 30, ui.wrapped_outputs[0].lines[:30])
def testTabCompletionTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\t"), # Trigger tab completion.
string_to_codes("2\t"), # With more prefix, tab again.
string_to_codes("3\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "120", "123"])
ui.run_ui()
# There should have been two different lists of candidates.
self.assertEqual([["10", "120", "123"], ["120", "123"]],
ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 123, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 123, ui.wrapped_outputs[0].lines[:123])
def testRegexSearch(self):
"""Test regex search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/a\n"), # Regex search and highlight.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The unwrapped (original) output should never have any highlighting.
self.assertEqual(3, len(ui.unwrapped_outputs))
for i in range(3):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
# The wrapped outputs should show highlighting depending on the regex.
self.assertEqual(3, len(ui.wrapped_outputs))
# The first output should have no highlighting.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
# The second output should have highlighting for "b" and "r".
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
# The third output should have highlighting for "a" only.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(1, 2, "black_on_white")],
ui.wrapped_outputs[2].font_attr_segs[i])
def testRegexSearchContinuation(self):
"""Test continuing scrolling down to next regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The 1st output is for the non-searched output. The other three are for
# the searched output. Even though continuation search "/" is performed
# four times, there should be only three searched outputs, because the
# last one has exceeded the end.
self.assertEqual(4, len(ui.unwrapped_outputs))
for i in range(4):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
for j in range(1, 4):
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[j].lines[:3])
self.assertEqual({
0: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
1: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
2: [(0, 1, "black_on_white"), (2, 3, "black_on_white")]
}, ui.wrapped_outputs[j].font_attr_segs)
self.assertEqual([0, 0, 1, 2], ui.output_pad_rows)
def testRegexSearchUnderLineWrapping(self):
ui = MockCursesUI(
40,
6, # Use a narrow window to trigger line wrapping
command_sequence=[
string_to_codes("babble -n 3 -l foo-bar-baz-qux\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some")
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
for wrapped_output in ui.wrapped_outputs:
self.assertEqual(["foo-", "bar-", "baz-", "qux"] * 3,
wrapped_output.lines[0 : 12])
# The scroll location should reflect the line wrapping.
self.assertEqual([0, 0, 4, 8], ui.output_pad_rows)
def testRegexSearchNoMatchContinuation(self):
"""Test continuing scrolling when there is no regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The regex search and continuation search in the 3rd command should not
# have produced any output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchContinuationWithoutSearch(self):
"""Test continuation scrolling when no regex search has been performed."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchWithInvalidRegex(self):
"""Test using invalid regex to search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/[\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# Invalid regex should not have led to a new screen of output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
# Invalid regex should have led to a toast error message.
self.assertEqual(
[MockCursesUI._UI_WAIT_MESSAGE,
"ERROR: Invalid regular expression: \"[\"",
MockCursesUI._UI_WAIT_MESSAGE],
ui.toasts)
def testRegexSearchFromCommandHistory(self):
"""Test regex search commands are recorded in command history."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("babble -n 4\n"),
[curses.KEY_UP],
[curses.KEY_UP],
string_to_codes("\n"), # Hit Up twice and Enter.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[2].lines[:4])
self.assertEqual({}, ui.wrapped_outputs[2].font_attr_segs)
# The regex search command loaded from history should have worked on the
# new screen output.
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[3].lines[:4])
for i in range(4):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[3].font_attr_segs[i])
def testDisplayTensorWithIndices(self):
"""Test displaying tensor with indices."""
ui = MockCursesUI(
9, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_END],
[curses.KEY_NPAGE], # This PageDown goes over the bottom limit.
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_HOME],
[curses.KEY_PPAGE], # This PageDown goes over the top limit.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(11, len(ui.unwrapped_outputs))
self.assertEqual(11, len(ui.output_array_pointer_indices))
self.assertEqual(11, len(ui.scroll_messages))
for i in range(11):
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"m\":", ""], ui.unwrapped_outputs[i].lines[:2])
self.assertEqual(
repr(np.ones([5, 5])).split("\n"), ui.unwrapped_outputs[i].lines[2:])
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[0])
# Scrolled down one line.
self.assertEqual({
0: None,
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertIn(" Scroll (PgDn/PgUp): 16.67% -[2,0] ", ui.scroll_messages[1])
# Scrolled down one line.
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertIn(" Scroll (PgDn/PgUp): 33.33% [0,0]-[3,0] ",
ui.scroll_messages[2])
# Scrolled down one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[3])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[3])
# Scroll to the bottom.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[4])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[4])
# Attempt to scroll beyond the bottom should lead to no change.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[5])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[5])
# Scrolled up one line.
self.assertEqual({
0: [3, 0],
-1: None
}, ui.output_array_pointer_indices[6])
self.assertIn(" Scroll (PgDn/PgUp): 83.33% [3,0]- ", ui.scroll_messages[6])
# Scrolled up one line.
self.assertEqual({
0: [2, 0],
-1: None
}, ui.output_array_pointer_indices[7])
self.assertIn(" Scroll (PgDn/PgUp): 66.67% [2,0]- ", ui.scroll_messages[7])
# Scrolled up one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[8])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[8])
# Scroll to the top.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[9])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[9])
# Attempt to scroll pass the top limit should lead to no change.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[10])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[10])
def testScrollTensorByValidIndices(self):
"""Test scrolling to specified (valid) indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[0, 0]\n"), # Scroll to element [0, 0].
string_to_codes("@1,0\n"), # Scroll to element [3, 0].
string_to_codes("@[0,2]\n"), # Scroll back to line 0.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.output_array_pointer_indices))
for i in range(4):
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"m\":", ""], ui.unwrapped_outputs[i].lines[:2])
self.assertEqual(
repr(np.ones([5, 5])).split("\n"), ui.unwrapped_outputs[i].lines[2:])
self.assertEqual({
0: None,
-1: [0, 0]
}, ui.output_array_pointer_indices[0])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertEqual({
0: [1, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[3])
def testScrollTensorByInvalidIndices(self):
"""Test scrolling to specified invalid indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[10, 0]\n"), # Scroll to invalid indices.
string_to_codes("@[]\n"), # Scroll to invalid indices.
string_to_codes("@\n"), # Scroll to invalid indices.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
# Because all scroll-by-indices commands are invalid, there should be only
# one output event.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.output_array_pointer_indices))
# Check error messages.
self.assertEqual("ERROR: Indices exceed tensor dimensions.", ui.toasts[2])
self.assertEqual("ERROR: invalid literal for int() with base 10: ''",
ui.toasts[4])
self.assertEqual("ERROR: Empty indices.", ui.toasts[6])
def testWriteScreenOutputToFileWorks(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2>%s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
# Clean up output file.
gfile.Remove(output_path)
def testIncompleteRedirectErrors(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["ERROR: Redirect file path is empty"], ui.toasts)
self.assertEqual(0, len(ui.unwrapped_outputs))
def testAppendingRedirectErrors(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >> %s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
# Clean up output file.
gfile.Remove(output_path)
def testMouseOffTakesEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("babble\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertFalse(ui._mouse_enabled)
self.assertIn("Mouse: OFF", ui.scroll_messages[-1])
def testMouseOffAndOnTakeEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("mouse on\n"),
string_to_codes("babble\n"), self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertTrue(ui._mouse_enabled)
self.assertIn("Mouse: ON", ui.scroll_messages[-1])
def testMouseClickOnLinkTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOnLinkWithExistingTextTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
string_to_codes("foo"), # Enter some existing code in the textbox.
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOffLinkDoesNotTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
# A click off a hyperlink (too much to the right).
[curses.KEY_MOUSE, 8, 4],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# The mouse click event should not triggered no command.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
# This command should have generated no main menus.
self.assertEqual([None], ui.main_menu_list)
def testMouseClickOnEnabledMenuItemWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the enabled menu item.
[curses.KEY_MOUSE, 3, 2],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
# Check the content of the menu.
self.assertEqual(["| babble again | ahoy | "], ui.main_menu_list[0].lines)
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs))
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs[0]))
item_annot = ui.main_menu_list[0].font_attr_segs[0][0]
self.assertEqual(2, item_annot[0])
self.assertEqual(14, item_annot[1])
self.assertEqual("babble", item_annot[2][0].content)
self.assertEqual("underline", item_annot[2][1])
# The output from the menu-triggered command does not have a menu.
self.assertIsNone(ui.main_menu_list[1])
def testMouseClickOnDisabledMenuItemTriggersNoCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the disabled menu item.
[curses.KEY_MOUSE, 18, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
def testNavigationUsingCommandLineWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testNavigationOverOldestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("prev\n"), # Navigate over oldest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual("At the OLDEST in navigation history!", ui.toasts[-2])
def testNavigationOverLatestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
string_to_codes("next\n"), # Navigate over latest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
self.assertEqual("At the LATEST in navigation history!", ui.toasts[-2])
def testMouseClicksOnNavBarWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testMouseClicksOnNavBarAfterPreviousScrollingWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
[curses.KEY_NPAGE], # Scroll down one line.
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(6, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
# From manual scroll.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[2].lines)
# From history navigation.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[3].lines)
# From history navigation's auto-scroll to history scroll position.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[4].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[5].lines)
self.assertEqual(6, len(ui.scroll_messages))
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[2])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[4])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[5])
class ScrollBarTest(test_util.TensorFlowTestCase):
def testConstructorRaisesExceptionForNotEnoughHeight(self):
with self.assertRaisesRegexp(
ValueError, r"Insufficient height for ScrollBar \(2\)"):
curses_ui.ScrollBar(0, 0, 1, 1, 0, 0)
def testLayoutIsEmptyForZeroRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 0)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testLayoutIsEmptyFoOneRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testClickCommandForOneRowIsNone(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertIsNone(scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testLayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth1LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 0, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["U"] + [" "] * 6 + ["D"], layout.lines)
self.assertEqual(
{0: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth3LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 2, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP "] + [" "] * 6 + ["DN "], layout.lines)
self.assertEqual(
{0: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth4LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 3, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual([" UP "] + [" "] * 6 + ["DOWN"], layout.lines)
self.assertEqual(
{0: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
6: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
3: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testClickCommandsAreCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(1))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(2))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
for i in range(1, 6):
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(i))
self.assertIsNone(scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForScrollBarNotAtZeroMinY(self):
scroll_bar = curses_ui.ScrollBar(0, 5, 1, 12, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(4))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(10))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(11))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(12))
self.assertIsNone(scroll_bar.get_click_command(13))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/curses_ui_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Format tensors (ndarrays) for screen display and navigation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
_NUMPY_OMISSION = "...,"
_NUMPY_DEFAULT_EDGE_ITEMS = 3
_NUMBER_REGEX = re.compile(r"[-+]?([0-9][-+0-9eE\.]+|nan|inf)(\s|,|\])")
BEGIN_INDICES_KEY = "i0"
OMITTED_INDICES_KEY = "omitted"
DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR = "bold"
class HighlightOptions(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
auxiliary_message=None,
include_numeric_summary=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
auxiliary_message: An auxiliary message to display under the tensor label,
dtype and shape information lines.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if isinstance(tensor, debug_data.InconvertibleTensorProto):
if lines:
lines.append("")
lines.extend(str(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape).replace("L", ""))
if lines:
lines.append("")
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
if auxiliary_message:
formatted.extend(auxiliary_message)
if include_numeric_summary:
formatted.append("Numeric summary:")
formatted.extend(numeric_summary(tensor))
formatted.append("")
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
if tensor.dtype.type is not np.string_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions)
else:
annotations = None
formatted_array = debugger_cli_common.RichTextLines(
array_lines, annotations=annotations)
formatted.extend(formatted_array)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in xrange(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in xrange(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
def _pad_string_to_length(string, length):
return " " * (length - len(string)) + string
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype])
|
tensorflow-master
|
tensorflow/python/debug/cli/tensor_format.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) User-Interface Factory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
SUPPORTED_UI_TYPES = ["curses", "readline"]
def get_ui(ui_type,
on_ui_exit=None,
available_ui_types=None,
config=None):
"""Create a `base_ui.BaseUI` subtype.
This factory method attempts to fallback to other available ui_types on
ImportError. For example, if `ui_type` is `curses`, but `curses` cannot be
imported properly, e.g., on Windows, will fallback to `readline`.
Args:
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
available_ui_types: (`None` or `list` of `str`) Manually-set available
ui_types.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
Returns:
A `base_ui.BaseUI` subtype object.
Raises:
ValueError: on invalid ui_type or on exhausting or fallback ui_types.
"""
if available_ui_types is None:
available_ui_types = copy.deepcopy(SUPPORTED_UI_TYPES)
if ui_type and (ui_type not in available_ui_types):
raise ValueError("Invalid ui_type: '%s'" % ui_type)
try:
# pylint: disable=g-import-not-at-top
if not ui_type or ui_type == "curses":
from tensorflow.python.debug.cli import curses_ui
return curses_ui.CursesUI(on_ui_exit=on_ui_exit, config=config)
elif ui_type == "readline":
from tensorflow.python.debug.cli import readline_ui
return readline_ui.ReadlineUI(on_ui_exit=on_ui_exit, config=config)
# pylint: enable=g-import-not-at-top
except ImportError:
available_ui_types.remove(ui_type)
if not available_ui_types:
raise ValueError("Exhausted all fallback ui_types.")
return get_ui(available_ui_types[0],
available_ui_types=available_ui_types)
|
tensorflow-master
|
tensorflow/python/debug/cli/ui_factory.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CLI Backend for the Analyzer Part of the Debugger.
The analyzer performs post hoc analysis of dumped intermediate tensors and
graph structure information from debugged Session.run() calls.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import evaluator
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
# String constants for the depth-dependent hanging indent at the beginning
# of each line.
HANG_UNFINISHED = "| " # Used for unfinished recursion depths.
HANG_FINISHED = " "
HANG_SUFFIX = "|- "
# String constant for displaying depth and op type.
DEPTH_TEMPLATE = "(%d) "
OP_TYPE_TEMPLATE = "[%s] "
# String constants for control inputs/outputs, etc.
CTRL_LABEL = "(Ctrl) "
ELLIPSIS = "..."
SORT_TENSORS_BY_TIMESTAMP = "timestamp"
SORT_TENSORS_BY_DUMP_SIZE = "dump_size"
SORT_TENSORS_BY_OP_TYPE = "op_type"
SORT_TENSORS_BY_TENSOR_NAME = "tensor_name"
def _add_main_menu(output,
node_name=None,
enable_list_tensors=True,
enable_node_info=True,
enable_print_tensor=True,
enable_list_inputs=True,
enable_list_outputs=True):
"""Generate main menu for the screen output from a command.
Args:
output: (debugger_cli_common.RichTextLines) the output object to modify.
node_name: (str or None) name of the node involved (if any). If None,
the menu items node_info, list_inputs and list_outputs will be
automatically disabled, overriding the values of arguments
enable_node_info, enable_list_inputs and enable_list_outputs.
enable_list_tensors: (bool) whether the list_tensor menu item will be
enabled.
enable_node_info: (bool) whether the node_info item will be enabled.
enable_print_tensor: (bool) whether the print_tensor item will be enabled.
enable_list_inputs: (bool) whether the item list_inputs will be enabled.
enable_list_outputs: (bool) whether the item list_outputs will be enabled.
"""
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem(
"list_tensors", "list_tensors", enabled=enable_list_tensors))
if node_name:
menu.append(
debugger_cli_common.MenuItem(
"node_info",
"node_info -a -d -t %s" % node_name,
enabled=enable_node_info))
menu.append(
debugger_cli_common.MenuItem(
"print_tensor",
"print_tensor %s" % node_name,
enabled=enable_print_tensor))
menu.append(
debugger_cli_common.MenuItem(
"list_inputs",
"list_inputs -c -r %s" % node_name,
enabled=enable_list_inputs))
menu.append(
debugger_cli_common.MenuItem(
"list_outputs",
"list_outputs -c -r %s" % node_name,
enabled=enable_list_outputs))
else:
menu.append(
debugger_cli_common.MenuItem(
"node_info", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("print_tensor", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_inputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("list_outputs", None, enabled=False))
menu.append(
debugger_cli_common.MenuItem("run_info", "run_info"))
menu.append(
debugger_cli_common.MenuItem("help", "help"))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
class DebugAnalyzer(object):
"""Analyzer for debug data from dump directories."""
_TIMESTAMP_COLUMN_HEAD = "t (ms)"
_DUMP_SIZE_COLUMN_HEAD = "Size (B)"
_OP_TYPE_COLUMN_HEAD = "Op type"
_TENSOR_NAME_COLUMN_HEAD = "Tensor name"
# Op types to be omitted when generating descriptions of graph structure.
_GRAPH_STRUCT_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def __init__(self, debug_dump, config):
"""DebugAnalyzer constructor.
Args:
debug_dump: A DebugDumpDir object.
config: A `cli_config.CLIConfig` object that carries user-facing
configurations.
"""
self._debug_dump = debug_dump
self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)
# Initialize tensor filters state.
self._tensor_filters = {}
self._build_argument_parsers(config)
config.set_callback("graph_recursion_depth",
self._build_argument_parsers)
# TODO(cais): Implement list_nodes.
def _build_argument_parsers(self, config):
"""Build argument parsers for DebugAnalayzer.
Args:
config: A `cli_config.CLIConfig` object.
Returns:
A dict mapping command handler name to `ArgumentParser` instance.
"""
# Argument parsers for command handlers.
self._arg_parsers = {}
# Parser for list_tensors.
ap = argparse.ArgumentParser(
description="List dumped intermediate tensors.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-f",
"--tensor_filter",
dest="tensor_filter",
type=str,
default="",
help="List only Tensors passing the filter of the specified name")
ap.add_argument(
"-fenn",
"--filter_exclude_node_names",
dest="filter_exclude_node_names",
type=str,
default="",
help="When applying the tensor filter, exclude node with names "
"matching the regular expression. Applicable only if --tensor_filter "
"or -f is used.")
ap.add_argument(
"-n",
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="filter op type by regex.")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_TENSORS_BY_TIMESTAMP,
help=("the field to sort the data by: (%s | %s | %s | %s)" %
(SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,
SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
self._arg_parsers["list_tensors"] = ap
# Parser for node_info.
ap = argparse.ArgumentParser(
description="Show information about a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an associated tensor, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-a",
"--attributes",
dest="attributes",
action="store_true",
help="Also list attributes of the node.")
ap.add_argument(
"-d",
"--dumps",
dest="dumps",
action="store_true",
help="Also list dumps available from the node.")
ap.add_argument(
"-t",
"--traceback",
dest="traceback",
action="store_true",
help="Also include the traceback of the node's creation "
"(if available in Python).")
self._arg_parsers["node_info"] = ap
# Parser for list_inputs.
ap = argparse.ArgumentParser(
description="Show inputs to a node.", usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=config.get("graph_recursion_depth"),
help="Maximum depth of recursion used when showing the input tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show inputs to the node recursively, i.e., the input tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of input nodes.")
self._arg_parsers["list_inputs"] = ap
# Parser for list_outputs.
ap = argparse.ArgumentParser(
description="Show the nodes that receive the outputs of given node.",
usage=argparse.SUPPRESS)
ap.add_argument(
"node_name",
type=str,
help="Name of the node or an output tensor from the node, e.g., "
"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0")
ap.add_argument(
"-c", "--control", action="store_true", help="Include control inputs.")
ap.add_argument(
"-d",
"--depth",
dest="depth",
type=int,
default=config.get("graph_recursion_depth"),
help="Maximum depth of recursion used when showing the output tree.")
ap.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Show recipients of the node recursively, i.e., the output "
"tree.")
ap.add_argument(
"-t",
"--op_type",
action="store_true",
help="Show op types of recipient nodes.")
self._arg_parsers["list_outputs"] = ap
# Parser for print_tensor.
self._arg_parsers["print_tensor"] = (
command_parser.get_print_tensor_argparser(
"Print the value of a dumped tensor."))
# Parser for print_source.
ap = argparse.ArgumentParser(
description="Print a Python source file with overlaid debug "
"information, including the nodes (ops) or Tensors created at the "
"source lines.",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source file.")
ap.add_argument(
"-t",
"--tensors",
dest="tensors",
action="store_true",
help="Label lines with dumped Tensors, instead of ops.")
ap.add_argument(
"-m",
"--max_elements_per_line",
type=int,
default=10,
help="Maximum number of elements (ops or Tensors) to show per source "
"line.")
ap.add_argument(
"-b",
"--line_begin",
type=int,
default=1,
help="Print source beginning at line number (1-based.)")
self._arg_parsers["print_source"] = ap
# Parser for list_source.
ap = argparse.ArgumentParser(
description="List source files responsible for constructing nodes and "
"tensors present in the run().",
usage=argparse.SUPPRESS)
ap.add_argument(
"-p",
"--path_filter",
type=str,
default="",
help="Regular expression filter for file path.")
ap.add_argument(
"-n",
"--node_name_filter",
type=str,
default="",
help="Regular expression filter for node name.")
self._arg_parsers["list_source"] = ap
# Parser for eval.
ap = argparse.ArgumentParser(
description="""Evaluate an arbitrary expression. Can use tensor values
from the current debug dump. The debug tensor names should be enclosed
in pairs of backticks. Expressions with spaces should be enclosed in
a pair of double quotes or a pair of single quotes. By default, numpy
is imported as np and can be used in the expressions. E.g.,
1) eval np.argmax(`Softmax:0`),
2) eval 'np.sum(`Softmax:0`, axis=1)',
3) eval "np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)".
""",
usage=argparse.SUPPRESS)
ap.add_argument(
"expression",
type=str,
help="""Expression to be evaluated.
1) in the simplest case, use <node_name>:<output_slot>, e.g.,
hidden_0/MatMul:0.
2) if the default debug op "DebugIdentity" is to be overridden, use
<node_name>:<output_slot>:<debug_op>, e.g.,
hidden_0/MatMul:0:DebugNumericSummary.
3) if the tensor of the same name exists on more than one device, use
<device_name>:<node_name>:<output_slot>[:<debug_op>], e.g.,
/job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0
/job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount.
4) if the tensor is executed multiple times in a given `Session.run`
call, specify the execution index with a 0-based integer enclose in a
pair of brackets at the end, e.g.,
RNN/tanh:0[0]
/job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].""")
ap.add_argument(
"-a",
"--all",
dest="print_all",
action="store_true",
help="Print the tensor in its entirety, i.e., do not use ellipses "
"(may be slow for large results).")
ap.add_argument(
"-w",
"--write_path",
default="",
help="Path of the numpy file to write the evaluation result to, "
"using numpy.save()")
self._arg_parsers["eval"] = ap
def add_tensor_filter(self, filter_name, filter_callable):
"""Add a tensor filter.
A tensor filter is a named callable of the signature:
filter_callable(dump_datum, tensor),
wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying
metadata about the dumped tensor, including tensor name, timestamps, etc.
tensor is the value of the dumped tensor as an numpy.ndarray object.
The return value of the function is a bool.
This is the same signature as the input argument to
debug_data.DebugDumpDir.find().
Args:
filter_name: (str) name of the filter. Cannot be empty.
filter_callable: (callable) a filter function of the signature described
as above.
Raises:
ValueError: If filter_name is an empty str.
TypeError: If filter_name is not a str.
Or if filter_callable is not callable.
"""
if not isinstance(filter_name, str):
raise TypeError("Input argument filter_name is expected to be str, "
"but is not.")
# Check that filter_name is not an empty str.
if not filter_name:
raise ValueError("Input argument filter_name cannot be empty.")
# Check that filter_callable is callable.
if not callable(filter_callable):
raise TypeError(
"Input argument filter_callable is expected to be callable, "
"but is not.")
self._tensor_filters[filter_name] = filter_callable
def get_tensor_filter(self, filter_name):
"""Retrieve filter function by name.
Args:
filter_name: Name of the filter set during add_tensor_filter() call.
Returns:
The callable associated with the filter name.
Raises:
ValueError: If there is no tensor filter of the specified filter name.
"""
if filter_name not in self._tensor_filters:
raise ValueError("There is no tensor filter named \"%s\"" % filter_name)
return self._tensor_filters[filter_name]
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def list_tensors(self, args, screen_info=None):
"""Command handler for list_tensors.
List tensors dumped during debugged Session.run() call.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
Raises:
ValueError: If `--filter_exclude_node_names` is used without `-f` or
`--tensor_filter` being used.
"""
# TODO(cais): Add annotations of substrings for dumped tensor names, to
# facilitate on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["list_tensors"].parse_args(args)
output = []
filter_strs = []
if parsed.op_type_filter:
op_type_regex = re.compile(parsed.op_type_filter)
filter_strs.append("Op type regex filter: \"%s\"" % parsed.op_type_filter)
else:
op_type_regex = None
if parsed.node_name_filter:
node_name_regex = re.compile(parsed.node_name_filter)
filter_strs.append("Node name regex filter: \"%s\"" %
parsed.node_name_filter)
else:
node_name_regex = None
output = debugger_cli_common.RichTextLines(filter_strs)
output.append("")
if parsed.tensor_filter:
try:
filter_callable = self.get_tensor_filter(parsed.tensor_filter)
except ValueError:
output = cli_shared.error("There is no tensor filter named \"%s\"." %
parsed.tensor_filter)
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
data_to_show = self._debug_dump.find(
filter_callable,
exclude_node_names=parsed.filter_exclude_node_names)
else:
if parsed.filter_exclude_node_names:
raise ValueError(
"The flag --filter_exclude_node_names is valid only when "
"the flag -f or --tensor_filter is used.")
data_to_show = self._debug_dump.dumped_tensor_data
# TODO(cais): Implement filter by lambda on tensor value.
max_timestamp_width, max_dump_size_width, max_op_type_width = (
self._measure_tensor_list_column_widths(data_to_show))
# Sort the data.
data_to_show = self._sort_dump_data_by(
data_to_show, parsed.sort_by, parsed.reverse)
output.extend(
self._tensor_list_column_heads(parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width))
dump_count = 0
for dump in data_to_show:
if node_name_regex and not node_name_regex.match(dump.node_name):
continue
if op_type_regex:
op_type = self._debug_dump.node_op_type(dump.node_name)
if not op_type_regex.match(op_type):
continue
rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
dumped_tensor_name = "%s:%d" % (dump.node_name, dump.output_slot)
op_type = self._debug_dump.node_op_type(dump.node_name)
line = "[%.3f]" % rel_time
line += " " * (max_timestamp_width - len(line))
line += dump_size_str
line += " " * (max_timestamp_width + max_dump_size_width - len(line))
line += op_type
line += " " * (max_timestamp_width + max_dump_size_width +
max_op_type_width - len(line))
line += dumped_tensor_name
output.append(
line,
font_attr_segs=[(
len(line) - len(dumped_tensor_name), len(line),
debugger_cli_common.MenuItem("", "pt %s" % dumped_tensor_name))])
dump_count += 1
if parsed.tensor_filter:
output.prepend([
"%d dumped tensor(s) passing filter \"%s\":" %
(dump_count, parsed.tensor_filter)
])
else:
output.prepend(["%d dumped tensor(s):" % dump_count])
_add_main_menu(output, node_name=None, enable_list_tensors=False)
return output
def _measure_tensor_list_column_widths(self, data):
"""Determine the maximum widths of the timestamp and op-type column.
This method assumes that data is sorted in the default order, i.e.,
by ascending timestamps.
Args:
data: (list of DebugTensorDaum) the data based on which the maximum
column widths will be determined.
Returns:
(int) maximum width of the timestamp column. 0 if data is empty.
(int) maximum width of the dump size column. 0 if data is empty.
(int) maximum width of the op type column. 0 if data is empty.
"""
max_timestamp_width = 0
if data:
max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0
max_timestamp_width = len("[%.3f] " % max_rel_time_ms) + 1
max_timestamp_width = max(max_timestamp_width,
len(self._TIMESTAMP_COLUMN_HEAD) + 1)
max_dump_size_width = 0
for dump in data:
dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)
if len(dump_size_str) + 1 > max_dump_size_width:
max_dump_size_width = len(dump_size_str) + 1
max_dump_size_width = max(max_dump_size_width,
len(self._DUMP_SIZE_COLUMN_HEAD) + 1)
max_op_type_width = 0
for dump in data:
op_type = self._debug_dump.node_op_type(dump.node_name)
if len(op_type) + 1 > max_op_type_width:
max_op_type_width = len(op_type) + 1
max_op_type_width = max(max_op_type_width,
len(self._OP_TYPE_COLUMN_HEAD) + 1)
return max_timestamp_width, max_dump_size_width, max_op_type_width
def _sort_dump_data_by(self, data, sort_by, reverse):
"""Sort a list of DebugTensorDatum in specified order.
Args:
data: (list of DebugTensorDatum) the data to be sorted.
sort_by: The field to sort data by.
reverse: (bool) Whether to use reversed (descending) order.
Returns:
(list of DebugTensorDatum) in sorted order.
Raises:
ValueError: given an invalid value of sort_by.
"""
if sort_by == SORT_TENSORS_BY_TIMESTAMP:
return sorted(
data,
reverse=reverse,
key=lambda x: x.timestamp)
elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:
return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)
elif sort_by == SORT_TENSORS_BY_OP_TYPE:
return sorted(
data,
reverse=reverse,
key=lambda x: self._debug_dump.node_op_type(x.node_name))
elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:
return sorted(
data,
reverse=reverse,
key=lambda x: "%s:%d" % (x.node_name, x.output_slot))
else:
raise ValueError("Unsupported key to sort tensors by: %s" % sort_by)
def _tensor_list_column_heads(self, parsed, max_timestamp_width,
max_dump_size_width, max_op_type_width):
"""Generate a line containing the column heads of the tensor list.
Args:
parsed: Parsed arguments (by argparse) of the list_tensors command.
max_timestamp_width: (int) maximum width of the timestamp column.
max_dump_size_width: (int) maximum width of the dump size column.
max_op_type_width: (int) maximum width of the op type column.
Returns:
A RichTextLines object.
"""
base_command = "list_tensors"
if parsed.tensor_filter:
base_command += " -f %s" % parsed.tensor_filter
if parsed.op_type_filter:
base_command += " -t %s" % parsed.op_type_filter
if parsed.node_name_filter:
base_command += " -n %s" % parsed.node_name_filter
attr_segs = {0: []}
row = self._TIMESTAMP_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TIMESTAMP)
if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:
command += " -r"
attr_segs[0].append(
(0, len(row), [debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_timestamp_width - len(row))
prev_len = len(row)
row += self._DUMP_SIZE_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)
if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (max_dump_size_width + max_timestamp_width - len(row))
prev_len = len(row)
row += self._OP_TYPE_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_OP_TYPE)
if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem(None, command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
prev_len = len(row)
row += self._TENSOR_NAME_COLUMN_HEAD
command = "%s -s %s" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)
if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:
command += " -r"
attr_segs[0].append((prev_len, len(row),
[debugger_cli_common.MenuItem("", command), "bold"]))
row += " " * (
max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)
)
return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)
def node_info(self, args, screen_info=None):
"""Command handler for node_info.
Query information about a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# TODO(cais): Add annotation of substrings for node names, to facilitate
# on-screen highlighting/selection of node names.
_ = screen_info
parsed = self._arg_parsers["node_info"].parse_args(args)
# Get a node name, regardless of whether the input is a node name (without
# output slot attached) or a tensor name (with output slot attached).
node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(
parsed.node_name)
if not self._debug_dump.node_exists(node_name):
output = cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_node_info=False,
enable_list_inputs=False,
enable_list_outputs=False)
return output
# TODO(cais): Provide UI glossary feature to explain to users what the
# term "partition graph" means and how it is related to TF graph objects
# in Python. The information can be along the line of:
# "A tensorflow graph defined in Python is stripped of unused ops
# according to the feeds and fetches and divided into a number of
# partition graphs that may be distributed among multiple devices and
# hosts. The partition graphs are what's actually executed by the C++
# runtime during a run() call."
lines = ["Node %s" % node_name]
font_attr_segs = {
0: [(len(lines[-1]) - len(node_name), len(lines[-1]), "bold")]
}
lines.append("")
lines.append(" Op: %s" % self._debug_dump.node_op_type(node_name))
lines.append(" Device: %s" % self._debug_dump.node_device(node_name))
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
# List node inputs (non-control and control).
inputs = self._exclude_blacklisted_ops(
self._debug_dump.node_inputs(node_name))
ctrl_inputs = self._exclude_blacklisted_ops(
self._debug_dump.node_inputs(node_name, is_control=True))
output.extend(self._format_neighbors("input", inputs, ctrl_inputs))
# List node output recipients (non-control and control).
recs = self._exclude_blacklisted_ops(
self._debug_dump.node_recipients(node_name))
ctrl_recs = self._exclude_blacklisted_ops(
self._debug_dump.node_recipients(node_name, is_control=True))
output.extend(self._format_neighbors("recipient", recs, ctrl_recs))
# Optional: List attributes of the node.
if parsed.attributes:
output.extend(self._list_node_attributes(node_name))
# Optional: List dumps available from the node.
if parsed.dumps:
output.extend(self._list_node_dumps(node_name))
if parsed.traceback:
output.extend(self._render_node_traceback(node_name))
_add_main_menu(output, node_name=node_name, enable_node_info=False)
return output
def _exclude_blacklisted_ops(self, node_names):
"""Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_BLACKLIST.
Args:
node_names: An iterable of node or graph element names.
Returns:
A list of node names that are not blacklisted.
"""
return [node_name for node_name in node_names
if self._debug_dump.node_op_type(
debug_graphs.get_node_name(node_name)) not in
self._GRAPH_STRUCT_OP_TYPE_BLACKLIST]
def _render_node_traceback(self, node_name):
"""Render traceback of a node's creation in Python, if available.
Args:
node_name: (str) name of the node.
Returns:
A RichTextLines object containing the stack trace of the node's
construction.
"""
lines = [RL(""), RL(""), RL("Traceback of node construction:", "bold")]
try:
node_stack = self._debug_dump.node_traceback(node_name)
for depth, (file_path, line, function_name, text) in enumerate(
node_stack):
lines.append("%d: %s" % (depth, file_path))
attribute = debugger_cli_common.MenuItem(
"", "ps %s -b %d" % (file_path, line)) if text else None
line_number_line = RL(" ")
line_number_line += RL("Line: %d" % line, attribute)
lines.append(line_number_line)
lines.append(" Function: %s" % function_name)
lines.append(" Text: " + (("\"%s\"" % text) if text else "None"))
lines.append("")
except KeyError:
lines.append("(Node unavailable in the loaded Python graph)")
except LookupError:
lines.append("(Unavailable because no Python graph has been loaded)")
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_inputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_inputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=False)
node_name = debug_graphs.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_inputs=False)
return output
def print_tensor(self, args, screen_info=None):
"""Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
parsed = self._arg_parsers["print_tensor"].parse_args(args)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
# Determine if any range-highlighting is required.
highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)
if (self._debug_dump.loaded_partition_graphs() and
not self._debug_dump.node_exists(node_name)):
output = cli_shared.error(
"Node \"%s\" does not exist in partition graphs" % node_name)
_add_main_menu(
output,
node_name=None,
enable_list_tensors=True,
enable_print_tensor=False)
return output
watch_keys = self._debug_dump.debug_watch_keys(node_name)
if output_slot is None:
output_slots = set()
for watch_key in watch_keys:
output_slots.add(int(watch_key.split(":")[1]))
if len(output_slots) == 1:
# There is only one dumped tensor from this node, so there is no
# ambiguity. Proceed to show the only dumped tensor.
output_slot = list(output_slots)[0]
else:
# There are more than one dumped tensors from this node. Indicate as
# such.
# TODO(cais): Provide an output screen with command links for
# convenience.
lines = [
"Node \"%s\" generated debug dumps from %s output slots:" %
(node_name, len(output_slots)),
"Please specify the output slot: %s:x." % node_name
]
output = debugger_cli_common.RichTextLines(lines)
_add_main_menu(
output,
node_name=node_name,
enable_list_tensors=True,
enable_print_tensor=False)
return output
# Find debug dump data that match the tensor name (node name + output
# slot).
matching_data = []
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
if datum.output_slot == output_slot:
matching_data.append(datum)
if not matching_data:
# No dump for this tensor.
output = cli_shared.error("Tensor \"%s\" did not generate any dumps." %
parsed.tensor_name)
elif len(matching_data) == 1:
# There is only one dump for this tensor.
if parsed.number <= 0:
output = cli_shared.format_tensor(
matching_data[0].get_tensor(),
matching_data[0].watch_key,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options,
include_numeric_summary=parsed.numeric_summary,
write_path=parsed.write_path)
else:
output = cli_shared.error(
"Invalid number (%d) for tensor %s, which generated one dump." %
(parsed.number, parsed.tensor_name))
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
else:
# There are more than one dumps for this tensor.
if parsed.number < 0:
lines = [
"Tensor \"%s\" generated %d dumps:" % (parsed.tensor_name,
len(matching_data))
]
font_attr_segs = {}
for i, datum in enumerate(matching_data):
rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0
lines.append("#%d [%.3f ms] %s" % (i, rel_time, datum.watch_key))
command = "print_tensor %s -n %d" % (parsed.tensor_name, i)
font_attr_segs[len(lines) - 1] = [(
len(lines[-1]) - len(datum.watch_key), len(lines[-1]),
debugger_cli_common.MenuItem(None, command))]
lines.append("")
lines.append(
"You can use the -n (--number) flag to specify which dump to "
"print.")
lines.append("For example:")
lines.append(" print_tensor %s -n 0" % parsed.tensor_name)
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
elif parsed.number >= len(matching_data):
output = cli_shared.error(
"Specified number (%d) exceeds the number of available dumps "
"(%d) for tensor %s" %
(parsed.number, len(matching_data), parsed.tensor_name))
else:
output = cli_shared.format_tensor(
matching_data[parsed.number].get_tensor(),
matching_data[parsed.number].watch_key + " (dump #%d)" %
parsed.number,
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=highlight_options,
write_path=parsed.write_path)
_add_main_menu(output, node_name=node_name, enable_print_tensor=False)
return output
def list_outputs(self, args, screen_info=None):
"""Command handler for inputs.
Show inputs to a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
# Screen info not currently used by this handler. Include this line to
# mute pylint.
_ = screen_info
# TODO(cais): Use screen info to format the output lines more prettily,
# e.g., hanging indent of long node names.
parsed = self._arg_parsers["list_outputs"].parse_args(args)
output = self._list_inputs_or_outputs(
parsed.recursive,
parsed.node_name,
parsed.depth,
parsed.control,
parsed.op_type,
do_outputs=True)
node_name = debug_graphs.get_node_name(parsed.node_name)
_add_main_menu(output, node_name=node_name, enable_list_outputs=False)
return output
def evaluate_expression(self, args, screen_info=None):
parsed = self._arg_parsers["eval"].parse_args(args)
eval_res = self._evaluator.evaluate(parsed.expression)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
return cli_shared.format_tensor(
eval_res,
"from eval of expression '%s'" % parsed.expression,
np_printoptions,
print_all=parsed.print_all,
include_numeric_summary=True,
write_path=parsed.write_path)
def _reconstruct_print_source_command(self,
parsed,
line_begin,
max_elements_per_line_increase=0):
return "ps %s %s -b %d -m %d" % (
parsed.source_file_path, "-t" if parsed.tensors else "", line_begin,
parsed.max_elements_per_line + max_elements_per_line_increase)
def print_source(self, args, screen_info=None):
"""Print the content of a source file."""
del screen_info # Unused.
parsed = self._arg_parsers["print_source"].parse_args(args)
source_annotation = source_utils.annotate_source(
self._debug_dump,
parsed.source_file_path,
do_dumped_tensors=parsed.tensors)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
labeled_source_lines = []
actual_initial_scroll_target = 0
for i, line in enumerate(source_lines):
annotated_line = RL("L%d" % (i + 1), cli_shared.COLOR_YELLOW)
annotated_line += " " * (line_num_width - len(annotated_line))
annotated_line += line
labeled_source_lines.append(annotated_line)
if i + 1 == parsed.line_begin:
actual_initial_scroll_target = len(labeled_source_lines) - 1
if i + 1 in source_annotation:
sorted_elements = sorted(source_annotation[i + 1])
for k, element in enumerate(sorted_elements):
if k >= parsed.max_elements_per_line:
omitted_info_line = RL(" (... Omitted %d of %d %s ...) " % (
len(sorted_elements) - parsed.max_elements_per_line,
len(sorted_elements),
"tensor(s)" if parsed.tensors else "op(s)"))
omitted_info_line += RL(
"+5",
debugger_cli_common.MenuItem(
None,
self._reconstruct_print_source_command(
parsed, i + 1, max_elements_per_line_increase=5)))
labeled_source_lines.append(omitted_info_line)
break
label = RL(" " * 4)
if self._debug_dump.debug_watch_keys(
debug_graphs.get_node_name(element)):
attribute = debugger_cli_common.MenuItem("", "pt %s" % element)
else:
attribute = cli_shared.COLOR_BLUE
label += RL(element, attribute)
labeled_source_lines.append(label)
output = debugger_cli_common.rich_text_lines_from_rich_line_list(
labeled_source_lines,
annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:
actual_initial_scroll_target})
_add_main_menu(output, node_name=None)
return output
def _make_source_table(self, source_list, is_tf_py_library):
"""Make a table summarizing the source files that create nodes and tensors.
Args:
source_list: List of source files and related information as a list of
tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line).
is_tf_py_library: (`bool`) whether this table is for files that belong
to the TensorFlow Python library.
Returns:
The table as a `debugger_cli_common.RichTextLines` object.
"""
path_head = "Source file path"
num_nodes_head = "#(nodes)"
num_tensors_head = "#(tensors)"
num_dumps_head = "#(tensor dumps)"
if is_tf_py_library:
# Use color to mark files that are guessed to belong to TensorFlow Python
# library.
color = cli_shared.COLOR_GRAY
lines = [RL("TensorFlow Python library file(s):", color)]
else:
color = cli_shared.COLOR_WHITE
lines = [RL("File(s) outside TensorFlow Python library:", color)]
if not source_list:
lines.append(RL("[No files.]"))
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
path_column_width = max(
max([len(item[0]) for item in source_list]), len(path_head)) + 1
num_nodes_column_width = max(
max([len(str(item[2])) for item in source_list]),
len(num_nodes_head)) + 1
num_tensors_column_width = max(
max([len(str(item[3])) for item in source_list]),
len(num_tensors_head)) + 1
head = RL(path_head + " " * (path_column_width - len(path_head)), color)
head += RL(num_nodes_head + " " * (
num_nodes_column_width - len(num_nodes_head)), color)
head += RL(num_tensors_head + " " * (
num_tensors_column_width - len(num_tensors_head)), color)
head += RL(num_dumps_head, color)
lines.append(head)
for (file_path, _, num_nodes, num_tensors, num_dumps,
first_line_num) in source_list:
path_attributes = [color]
if source_utils.is_extension_uncompiled_python_source(file_path):
path_attributes.append(
debugger_cli_common.MenuItem(None, "ps %s -b %d" %
(file_path, first_line_num)))
line = RL(file_path, path_attributes)
line += " " * (path_column_width - len(line))
line += RL(
str(num_nodes) + " " * (num_nodes_column_width - len(str(num_nodes))),
color)
line += RL(
str(num_tensors) + " " *
(num_tensors_column_width - len(str(num_tensors))), color)
line += RL(str(num_dumps), color)
lines.append(line)
lines.append(RL())
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def list_source(self, args, screen_info=None):
"""List Python source files that constructed nodes and tensors."""
del screen_info # Unused.
parsed = self._arg_parsers["list_source"].parse_args(args)
source_list = source_utils.list_source_files_against_dump(
self._debug_dump,
path_regex_whitelist=parsed.path_filter,
node_name_regex_whitelist=parsed.node_name_filter)
top_lines = [
RL("List of source files that created nodes in this run", "bold")]
if parsed.path_filter:
top_lines.append(
RL("File path regex filter: \"%s\"" % parsed.path_filter))
if parsed.node_name_filter:
top_lines.append(
RL("Node name regex filter: \"%s\"" % parsed.node_name_filter))
top_lines.append(RL())
output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)
if not source_list:
output.append("[No source file information.]")
return output
output.extend(self._make_source_table(
[item for item in source_list if not item[1]], False))
output.extend(self._make_source_table(
[item for item in source_list if item[1]], True))
_add_main_menu(output, node_name=None)
return output
def _list_inputs_or_outputs(self,
recursive,
node_name,
depth,
control,
op_type,
do_outputs=False):
"""Helper function used by list_inputs and list_outputs.
Format a list of lines to display the inputs or output recipients of a
given node.
Args:
recursive: Whether the listing is to be done recursively, as a boolean.
node_name: The name of the node in question, as a str.
depth: Maximum recursion depth, applies only if recursive == True, as an
int.
control: Whether control inputs or control recipients are included, as a
boolean.
op_type: Whether the op types of the nodes are to be included, as a
boolean.
do_outputs: Whether recipients, instead of input nodes are to be
listed, as a boolean.
Returns:
Input or recipient tree formatted as a RichTextLines object.
"""
if do_outputs:
tracker = self._debug_dump.node_recipients
type_str = "Recipients of"
short_type_str = "recipients"
else:
tracker = self._debug_dump.node_inputs
type_str = "Inputs to"
short_type_str = "inputs"
lines = []
font_attr_segs = {}
# Check if this is a tensor name, instead of a node name.
node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)
# Check if node exists.
if not self._debug_dump.node_exists(node_name):
return cli_shared.error(
"There is no node named \"%s\" in the partition graphs" % node_name)
if recursive:
max_depth = depth
else:
max_depth = 1
if control:
include_ctrls_str = ", control %s included" % short_type_str
else:
include_ctrls_str = ""
line = "%s node \"%s\"" % (type_str, node_name)
font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, "bold")
]
lines.append(line + " (Depth limit = %d%s):" % (max_depth, include_ctrls_str
))
command_template = "lo -c -r %s" if do_outputs else "li -c -r %s"
self._dfs_from_node(
lines,
font_attr_segs,
node_name,
tracker,
max_depth,
1, [],
control,
op_type,
command_template=command_template)
# Include legend.
lines.append("")
lines.append("Legend:")
lines.append(" (d): recursion depth = d.")
if control:
lines.append(" (Ctrl): Control input.")
if op_type:
lines.append(" [Op]: Input node has op type Op.")
# TODO(cais): Consider appending ":0" at the end of 1st outputs of nodes.
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _dfs_from_node(self,
lines,
attr_segs,
node_name,
tracker,
max_depth,
depth,
unfinished,
include_control=False,
show_op_type=False,
command_template=None):
"""Perform depth-first search (DFS) traversal of a node's input tree.
It recursively tracks the inputs (or output recipients) of the node called
node_name, and append these inputs (or output recipients) to a list of text
lines (lines) with proper indentation that reflects the recursion depth,
together with some formatting attributes (to attr_segs). The formatting
attributes can include command shortcuts, for example.
Args:
lines: Text lines to append to, as a list of str.
attr_segs: (dict) Attribute segments dictionary to append to.
node_name: Name of the node, as a str. This arg is updated during the
recursion.
tracker: A callable that takes one str as the node name input and
returns a list of str as the inputs/outputs.
This makes it this function general enough to be used with both
node-input and node-output tracking.
max_depth: Maximum recursion depth, as an int.
depth: Current recursion depth. This arg is updated during the
recursion.
unfinished: A stack of unfinished recursion depths, as a list of int.
include_control: Whether control dependencies are to be included as
inputs (and marked as such).
show_op_type: Whether op type of the input nodes are to be displayed
alongside the nodes' names.
command_template: (str) Template for command shortcut of the node names.
"""
# Make a shallow copy of the list because it may be extended later.
all_inputs = self._exclude_blacklisted_ops(
copy.copy(tracker(node_name, is_control=False)))
is_ctrl = [False] * len(all_inputs)
if include_control:
# Sort control inputs or recipients in alphabetical order of the node
# names.
ctrl_inputs = self._exclude_blacklisted_ops(
sorted(tracker(node_name, is_control=True)))
all_inputs.extend(ctrl_inputs)
is_ctrl.extend([True] * len(ctrl_inputs))
if not all_inputs:
if depth == 1:
lines.append(" [None]")
return
unfinished.append(depth)
# Create depth-dependent hanging indent for the line.
hang = ""
for k in xrange(depth):
if k < depth - 1:
if k + 1 in unfinished:
hang += HANG_UNFINISHED
else:
hang += HANG_FINISHED
else:
hang += HANG_SUFFIX
if all_inputs and depth > max_depth:
lines.append(hang + ELLIPSIS)
unfinished.pop()
return
hang += DEPTH_TEMPLATE % depth
for i in xrange(len(all_inputs)):
inp = all_inputs[i]
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
continue
if is_ctrl[i]:
ctrl_str = CTRL_LABEL
else:
ctrl_str = ""
op_type_str = ""
if show_op_type:
op_type_str = OP_TYPE_TEMPLATE % op_type
if i == len(all_inputs) - 1:
unfinished.pop()
line = hang + ctrl_str + op_type_str + inp
lines.append(line)
if command_template:
attr_segs[len(lines) - 1] = [(
len(line) - len(inp), len(line),
debugger_cli_common.MenuItem(None, command_template % inp))]
# Recursive call.
# The input's/output's name can be a tensor name, in the case of node
# with >1 output slots.
inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp)
self._dfs_from_node(
lines,
attr_segs,
inp_node_name,
tracker,
max_depth,
depth + 1,
unfinished,
include_control=include_control,
show_op_type=show_op_type,
command_template=command_template)
def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):
"""List neighbors (inputs or recipients) of a node.
Args:
neighbor_type: ("input" | "recipient")
non_ctrls: Non-control neighbor node names, as a list of str.
ctrls: Control neighbor node names, as a list of str.
Returns:
A RichTextLines object.
"""
# TODO(cais): Return RichTextLines instead, to allow annotation of node
# names.
lines = []
font_attr_segs = {}
lines.append("")
lines.append(" %d %s(s) + %d control %s(s):" %
(len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))
lines.append(" %d %s(s):" % (len(non_ctrls), neighbor_type))
for non_ctrl in non_ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(non_ctrl),
non_ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(non_ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % non_ctrl))]
if ctrls:
lines.append("")
lines.append(" %d control %s(s):" % (len(ctrls), neighbor_type))
for ctrl in ctrls:
line = " [%s] %s" % (self._debug_dump.node_op_type(ctrl), ctrl)
lines.append(line)
font_attr_segs[len(lines) - 1] = [(
len(line) - len(ctrl), len(line),
debugger_cli_common.MenuItem(None, "ni -a -d -t %s" % ctrl))]
return debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
def _list_node_attributes(self, node_name):
"""List neighbors (inputs or recipients) of a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
lines.append("")
lines.append("Node attributes:")
attrs = self._debug_dump.node_attributes(node_name)
for attr_key in attrs:
lines.append(" %s:" % attr_key)
attr_val_str = repr(attrs[attr_key]).strip().replace("\n", " ")
lines.append(" %s" % attr_val_str)
lines.append("")
return debugger_cli_common.RichTextLines(lines)
def _list_node_dumps(self, node_name):
"""List dumped tensor data from a node.
Args:
node_name: Name of the node of which the attributes are to be listed.
Returns:
A RichTextLines object.
"""
lines = []
font_attr_segs = {}
watch_keys = self._debug_dump.debug_watch_keys(node_name)
dump_count = 0
for watch_key in watch_keys:
debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)
for datum in debug_tensor_data:
line = " Slot %d @ %s @ %.3f ms" % (
datum.output_slot, datum.debug_op,
(datum.timestamp - self._debug_dump.t0) / 1000.0)
lines.append(line)
command = "pt %s:%d -n %d" % (node_name, datum.output_slot, dump_count)
font_attr_segs[len(lines) - 1] = [(
2, len(line), debugger_cli_common.MenuItem(None, command))]
dump_count += 1
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
output_with_header = debugger_cli_common.RichTextLines(
["%d dumped tensor(s):" % dump_count, ""])
output_with_header.extend(output)
return output_with_header
def create_analyzer_ui(debug_dump,
tensor_filters=None,
ui_type="curses",
on_ui_exit=None,
config=None):
"""Create an instance of CursesUI based on a DebugDumpDir object.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump to use.
tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor
filter (Callable).
ui_type: (str) requested UI type, e.g., "curses", "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: A `cli_config.CLIConfig` object.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
if config is None:
config = cli_config.CLIConfig()
analyzer = DebugAnalyzer(debug_dump, config=config)
if tensor_filters:
for tensor_filter_name in tensor_filters:
analyzer.add_tensor_filter(
tensor_filter_name, tensor_filters[tensor_filter_name])
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)
cli.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
cli.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
cli.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
cli.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
cli.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
cli.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
cli.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names)
return cli
|
tensorflow-master
|
tensorflow/python/debug/cli/analyzer_cli.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Formats and displays profiling information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import numpy as np
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import profiling
from tensorflow.python.debug.lib import source_utils
RL = debugger_cli_common.RichLine
SORT_OPS_BY_OP_NAME = "node"
SORT_OPS_BY_OP_TYPE = "op_type"
SORT_OPS_BY_OP_TIME = "op_time"
SORT_OPS_BY_EXEC_TIME = "exec_time"
SORT_OPS_BY_START_TIME = "start_time"
SORT_OPS_BY_LINE = "line"
_DEVICE_NAME_FILTER_FLAG = "device_name_filter"
_NODE_NAME_FILTER_FLAG = "node_name_filter"
_OP_TYPE_FILTER_FLAG = "op_type_filter"
class ProfileDataTableView(object):
"""Table View of profiling data."""
def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):
"""Constructor.
Args:
profile_datum_list: List of `ProfileDatum` objects.
time_unit: must be in cli_shared.TIME_UNITS.
"""
self._profile_datum_list = profile_datum_list
self.formatted_start_time = [
datum.start_time for datum in profile_datum_list]
self.formatted_op_time = [
cli_shared.time_to_readable_str(datum.op_time,
force_time_unit=time_unit)
for datum in profile_datum_list]
self.formatted_exec_time = [
cli_shared.time_to_readable_str(
datum.node_exec_stats.all_end_rel_micros,
force_time_unit=time_unit)
for datum in profile_datum_list]
self._column_names = ["Node",
"Op Type",
"Start Time (us)",
"Op Time (%s)" % time_unit,
"Exec Time (%s)" % time_unit,
"Filename:Lineno(function)"]
self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]
def value(self,
row,
col,
device_name_filter=None,
node_name_filter=None,
op_type_filter=None):
"""Get the content of a cell of the table.
Args:
row: (int) row index.
col: (int) column index.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
Returns:
A debuggre_cli_common.RichLine object representing the content of the
cell, potentially with a clickable MenuItem.
Raises:
IndexError: if row index is out of range.
"""
menu_item = None
if col == 0:
text = self._profile_datum_list[row].node_exec_stats.node_name
elif col == 1:
text = self._profile_datum_list[row].op_type
elif col == 2:
text = str(self.formatted_start_time[row])
elif col == 3:
text = str(self.formatted_op_time[row])
elif col == 4:
text = str(self.formatted_exec_time[row])
elif col == 5:
command = "ps"
if device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
device_name_filter)
if node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG, node_name_filter)
if op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG, op_type_filter)
command += " %s --init_line %d" % (
self._profile_datum_list[row].file_path,
self._profile_datum_list[row].line_number)
menu_item = debugger_cli_common.MenuItem(None, command)
text = self._profile_datum_list[row].file_line_func
else:
raise IndexError("Invalid column index %d." % col)
return RL(text, font_attr=menu_item)
def row_count(self):
return len(self._profile_datum_list)
def column_count(self):
return len(self._column_names)
def column_names(self):
return self._column_names
def column_sort_id(self, col):
return self._column_sort_ids[col]
def _list_profile_filter(
profile_datum,
node_name_regex,
file_path_regex,
op_type_regex,
op_time_interval,
exec_time_interval,
min_lineno=-1,
max_lineno=-1):
"""Filter function for list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
node_name_regex: Regular expression pattern object to filter by name.
file_path_regex: Regular expression pattern object to filter by file path.
op_type_regex: Regular expression pattern object to filter by op type.
op_time_interval: `Interval` for filtering op time.
exec_time_interval: `Interval` for filtering exec time.
min_lineno: Lower bound for 1-based line number, inclusive.
If <= 0, has no effect.
max_lineno: Upper bound for 1-based line number, exclusive.
If <= 0, has no effect.
# TODO(cais): Maybe filter by function name.
Returns:
True iff profile_datum should be included.
"""
if node_name_regex and not node_name_regex.match(
profile_datum.node_exec_stats.node_name):
return False
if file_path_regex:
if (not profile_datum.file_path or
not file_path_regex.match(profile_datum.file_path)):
return False
if (min_lineno > 0 and profile_datum.line_number and
profile_datum.line_number < min_lineno):
return False
if (max_lineno > 0 and profile_datum.line_number and
profile_datum.line_number >= max_lineno):
return False
if (profile_datum.op_type is not None and op_type_regex and
not op_type_regex.match(profile_datum.op_type)):
return False
if op_time_interval is not None and not op_time_interval.contains(
profile_datum.op_time):
return False
if exec_time_interval and not exec_time_interval.contains(
profile_datum.node_exec_stats.all_end_rel_micros):
return False
return True
def _list_profile_sort_key(profile_datum, sort_by):
"""Get a profile_datum property to sort by in list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
sort_by: (string) indicates a value to sort by.
Must be one of SORT_BY* constants.
Returns:
profile_datum property to sort by.
"""
if sort_by == SORT_OPS_BY_OP_NAME:
return profile_datum.node_exec_stats.node_name
elif sort_by == SORT_OPS_BY_OP_TYPE:
return profile_datum.op_type
elif sort_by == SORT_OPS_BY_LINE:
return profile_datum.file_line_func
elif sort_by == SORT_OPS_BY_OP_TIME:
return profile_datum.op_time
elif sort_by == SORT_OPS_BY_EXEC_TIME:
return profile_datum.node_exec_stats.all_end_rel_micros
else: # sort by start time
return profile_datum.node_exec_stats.all_start_micros
class ProfileAnalyzer(object):
"""Analyzer for profiling data."""
def __init__(self, graph, run_metadata):
"""ProfileAnalyzer constructor.
Args:
graph: (tf.Graph) Python graph object.
run_metadata: A `RunMetadata` protobuf object.
Raises:
ValueError: If run_metadata is None.
"""
self._graph = graph
if not run_metadata:
raise ValueError("No RunMetadata passed for profile analysis.")
self._run_metadata = run_metadata
self._arg_parsers = {}
ap = argparse.ArgumentParser(
description="List nodes profile information.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="filter op type by regex.")
# TODO(annarev): allow file filtering at non-stack top position.
ap.add_argument(
"-f",
"--file_path_filter",
dest="file_path_filter",
type=str,
default="",
help="filter by file name at the top position of node's creation "
"stack that does not belong to TensorFlow library.")
ap.add_argument(
"--min_lineno",
dest="min_lineno",
type=int,
default=-1,
help="(Inclusive) lower bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"--max_lineno",
dest="max_lineno",
type=int,
default=-1,
help="(Exclusive) upper bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"-e",
"--execution_time",
dest="execution_time",
type=str,
default="",
help="Filter by execution time interval "
"(includes compute plus pre- and post -processing time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-o",
"--op_time",
dest="op_time",
type=str,
default="",
help="Filter by op time interval (only includes compute time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_OPS_BY_START_TIME,
help=("the field to sort the data by: (%s)" %
" | ".join([SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE])))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
self._arg_parsers["list_profile"] = ap
ap = argparse.ArgumentParser(
description="Print a Python source file with line-level profile "
"information",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source_file_path")
ap.add_argument(
"--cost_type",
type=str,
choices=["exec_time", "op_time"],
default="exec_time",
help="Type of cost to display")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="Filter op type by regex.")
ap.add_argument(
"--init_line",
dest="init_line",
type=int,
default=0,
help="The 1-based line number to scroll to initially.")
self._arg_parsers["print_source"] = ap
def list_profile(self, args, screen_info=None):
"""Command handler for list_profile.
List per-operation profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
screen_cols = 80
if screen_info and "cols" in screen_info:
screen_cols = screen_info["cols"]
parsed = self._arg_parsers["list_profile"].parse_args(args)
op_time_interval = (command_parser.parse_time_interval(parsed.op_time)
if parsed.op_time else None)
exec_time_interval = (
command_parser.parse_time_interval(parsed.execution_time)
if parsed.execution_time else None)
node_name_regex = (re.compile(parsed.node_name_filter)
if parsed.node_name_filter else None)
file_path_regex = (re.compile(parsed.file_path_filter)
if parsed.file_path_filter else None)
op_type_regex = (re.compile(parsed.op_type_filter)
if parsed.op_type_filter else None)
output = debugger_cli_common.RichTextLines([""])
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if not device_name_regex or device_name_regex.match(device_stats.device):
profile_data = [
datum for datum in data_generator(device_stats)
if _list_profile_filter(
datum, node_name_regex, file_path_regex, op_type_regex,
op_time_interval, exec_time_interval,
min_lineno=parsed.min_lineno, max_lineno=parsed.max_lineno)]
profile_data = sorted(
profile_data,
key=lambda datum: _list_profile_sort_key(datum, parsed.sort_by),
reverse=parsed.reverse)
output.extend(
self._get_list_profile_lines(
device_stats.device, index, device_count,
profile_data, parsed.sort_by, parsed.reverse, parsed.time_unit,
device_name_filter=parsed.device_name_filter,
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter,
screen_cols=screen_cols))
return output
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_file_path = {}
node_to_line_number = {}
node_to_func_name = {}
node_to_op_type = {}
for op in self._graph.get_operations():
for trace_entry in reversed(op.traceback):
file_path = trace_entry[0]
line_num = trace_entry[1]
func_name = trace_entry[2]
if not source_utils.guess_is_tensorflow_py_library(file_path):
break
node_to_file_path[op.name] = file_path
node_to_line_number[op.name] = line_num
node_to_func_name[op.name] = func_name
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == "_SOURCE" or node_stats.node_name == "_SINK":
continue
yield profiling.ProfileDatum(
device_step_stats.device,
node_stats,
node_to_file_path.get(node_stats.node_name, ""),
node_to_line_number.get(node_stats.node_name, 0),
node_to_func_name.get(node_stats.node_name, ""),
node_to_op_type.get(node_stats.node_name, ""))
return profile_data_generator
def _get_list_profile_lines(
self, device_name, device_index, device_count,
profile_datum_list, sort_by, sort_reverse, time_unit,
device_name_filter=None, node_name_filter=None, op_type_filter=None,
screen_cols=80):
"""Get `RichTextLines` object for list_profile command for a given device.
Args:
device_name: (string) Device name.
device_index: (int) Device index.
device_count: (int) Number of devices.
profile_datum_list: List of `ProfileDatum` objects.
sort_by: (string) Identifier of column to sort. Sort identifier
must match value of SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_MEMORY or SORT_OPS_BY_LINE.
sort_reverse: (bool) Whether to sort in descending instead of default
(ascending) order.
time_unit: time unit, must be in cli_shared.TIME_UNITS.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
screen_cols: (int) Number of columns available on the screen (i.e.,
available screen width).
Returns:
`RichTextLines` object containing a table that displays profiling
information for each op.
"""
profile_data = ProfileDataTableView(profile_datum_list, time_unit=time_unit)
# Calculate total time early to calculate column widths.
total_op_time = sum(datum.op_time for datum in profile_datum_list)
total_exec_time = sum(datum.node_exec_stats.all_end_rel_micros
for datum in profile_datum_list)
device_total_row = [
"Device Total", "",
cli_shared.time_to_readable_str(total_op_time,
force_time_unit=time_unit),
cli_shared.time_to_readable_str(total_exec_time,
force_time_unit=time_unit)]
# Calculate column widths.
column_widths = [
len(column_name) for column_name in profile_data.column_names()]
for col in range(len(device_total_row)):
column_widths[col] = max(column_widths[col], len(device_total_row[col]))
for col in range(len(column_widths)):
for row in range(profile_data.row_count()):
column_widths[col] = max(
column_widths[col], len(profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)))
column_widths[col] += 2 # add margin between columns
# Add device name.
output = [RL("-" * screen_cols)]
device_row = "Device %d of %d: %s" % (
device_index + 1, device_count, device_name)
output.append(RL(device_row))
output.append(RL())
# Add headers.
base_command = "list_profile"
row = RL()
for col in range(profile_data.column_count()):
column_name = profile_data.column_names()[col]
sort_id = profile_data.column_sort_id(col)
command = "%s -s %s" % (base_command, sort_id)
if sort_by == sort_id and not sort_reverse:
command += " -r"
head_menu_item = debugger_cli_common.MenuItem(None, command)
row += RL(column_name, font_attr=[head_menu_item, "bold"])
row += RL(" " * (column_widths[col] - len(column_name)))
output.append(row)
# Add data rows.
for row in range(profile_data.row_count()):
new_row = RL()
for col in range(profile_data.column_count()):
new_cell = profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)
new_row += new_cell
new_row += RL(" " * (column_widths[col] - len(new_cell)))
output.append(new_row)
# Add stat totals.
row_str = ""
for col in range(len(device_total_row)):
row_str += ("{:<%d}" % column_widths[col]).format(device_total_row[col])
output.append(RL())
output.append(RL(row_str))
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
def _measure_list_profile_column_widths(self, profile_data):
"""Determine the maximum column widths for each data list.
Args:
profile_data: list of ProfileDatum objects.
Returns:
List of column widths in the same order as columns in data.
"""
num_columns = len(profile_data.column_names())
widths = [len(column_name) for column_name in profile_data.column_names()]
for row in range(profile_data.row_count()):
for col in range(num_columns):
widths[col] = max(
widths[col], len(str(profile_data.row_values(row)[col])) + 2)
return widths
_LINE_COST_ATTR = cli_shared.COLOR_CYAN
_LINE_NUM_ATTR = cli_shared.COLOR_YELLOW
_NUM_NODES_HEAD = "#nodes"
_NUM_EXECS_SUB_HEAD = "(#execs)"
_LINENO_HEAD = "lineno"
_SOURCE_HEAD = "source"
def print_source(self, args, screen_info=None):
"""Print a Python source file with line-level profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
del screen_info
parsed = self._arg_parsers["print_source"].parse_args(args)
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
profile_data = []
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if device_name_regex and not device_name_regex.match(device_stats.device):
continue
profile_data.extend([datum for datum in data_generator(device_stats)])
source_annotation = source_utils.annotate_source_against_profile(
profile_data,
os.path.expanduser(parsed.source_file_path),
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter)
if not source_annotation:
return debugger_cli_common.RichTextLines(
["The source file %s does not contain any profile information for "
"the previous Session run under the following "
"filters:" % parsed.source_file_path,
" --%s: %s" % (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter),
" --%s: %s" % (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter),
" --%s: %s" % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)])
max_total_cost = 0
for line_index in source_annotation:
total_cost = self._get_total_cost(source_annotation[line_index],
parsed.cost_type)
max_total_cost = max(max_total_cost, total_cost)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
cost_bar_max_length = 10
total_cost_head = parsed.cost_type
column_widths = {
"cost_bar": cost_bar_max_length + 3,
"total_cost": len(total_cost_head) + 3,
"num_nodes_execs": len(self._NUM_EXECS_SUB_HEAD) + 1,
"line_number": line_num_width,
}
head = RL(
" " * column_widths["cost_bar"] +
total_cost_head +
" " * (column_widths["total_cost"] - len(total_cost_head)) +
self._NUM_NODES_HEAD +
" " * (column_widths["num_nodes_execs"] - len(self._NUM_NODES_HEAD)),
font_attr=self._LINE_COST_ATTR)
head += RL(self._LINENO_HEAD, font_attr=self._LINE_NUM_ATTR)
sub_head = RL(
" " * (column_widths["cost_bar"] +
column_widths["total_cost"]) +
self._NUM_EXECS_SUB_HEAD +
" " * (column_widths["num_nodes_execs"] -
len(self._NUM_EXECS_SUB_HEAD)) +
" " * column_widths["line_number"],
font_attr=self._LINE_COST_ATTR)
sub_head += RL(self._SOURCE_HEAD, font_attr="bold")
lines = [head, sub_head]
output_annotations = {}
for i, line in enumerate(source_lines):
lineno = i + 1
if lineno in source_annotation:
annotation = source_annotation[lineno]
cost_bar = self._render_normalized_cost_bar(
self._get_total_cost(annotation, parsed.cost_type), max_total_cost,
cost_bar_max_length)
annotated_line = cost_bar
annotated_line += " " * (column_widths["cost_bar"] - len(cost_bar))
total_cost = RL(cli_shared.time_to_readable_str(
self._get_total_cost(annotation, parsed.cost_type),
force_time_unit=parsed.time_unit),
font_attr=self._LINE_COST_ATTR)
total_cost += " " * (column_widths["total_cost"] - len(total_cost))
annotated_line += total_cost
file_path_filter = re.escape(parsed.source_file_path) + "$"
command = "lp --file_path_filter %s --min_lineno %d --max_lineno %d" % (
file_path_filter, lineno, lineno + 1)
if parsed.device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
parsed.device_name_filter)
if parsed.node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG,
parsed.node_name_filter)
if parsed.op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG,
parsed.op_type_filter)
menu_item = debugger_cli_common.MenuItem(None, command)
num_nodes_execs = RL("%d(%d)" % (annotation.node_count,
annotation.node_exec_count),
font_attr=[self._LINE_COST_ATTR, menu_item])
num_nodes_execs += " " * (
column_widths["num_nodes_execs"] - len(num_nodes_execs))
annotated_line += num_nodes_execs
else:
annotated_line = RL(
" " * sum(column_widths[col_name] for col_name in column_widths
if col_name != "line_number"))
line_num_column = RL(" L%d" % (lineno), self._LINE_NUM_ATTR)
line_num_column += " " * (
column_widths["line_number"] - len(line_num_column))
annotated_line += line_num_column
annotated_line += line
lines.append(annotated_line)
if parsed.init_line == lineno:
output_annotations[
debugger_cli_common.INIT_SCROLL_POS_KEY] = len(lines) - 1
return debugger_cli_common.rich_text_lines_from_rich_line_list(
lines, annotations=output_annotations)
def _get_total_cost(self, aggregated_profile, cost_type):
if cost_type == "exec_time":
return aggregated_profile.total_exec_time
elif cost_type == "op_time":
return aggregated_profile.total_op_time
else:
raise ValueError("Unsupported cost type: %s" % cost_type)
def _render_normalized_cost_bar(self, cost, max_cost, length):
"""Render a text bar representing a normalized cost.
Args:
cost: the absolute value of the cost.
max_cost: the maximum cost value to normalize the absolute cost with.
length: (int) length of the cost bar, in number of characters, excluding
the brackets on the two ends.
Returns:
An instance of debugger_cli_common.RichTextLine.
"""
num_ticks = int(np.ceil(float(cost) / max_cost * length))
num_ticks = num_ticks or 1 # Minimum is 1 tick.
output = RL("[", font_attr=self._LINE_COST_ATTR)
output += RL("|" * num_ticks + " " * (length - num_ticks),
font_attr=["bold", self._LINE_COST_ATTR])
output += RL("]", font_attr=self._LINE_COST_ATTR)
return output
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def create_profiler_ui(graph,
run_metadata,
ui_type="curses",
on_ui_exit=None,
config=None):
"""Create an instance of CursesUI based on a `tf.Graph` and `RunMetadata`.
Args:
graph: Python `Graph` object.
run_metadata: A `RunMetadata` protobuf object.
ui_type: (str) requested UI type, e.g., "curses", "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig`.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
del config # Currently unused.
analyzer = ProfileAnalyzer(graph, run_metadata)
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)
cli.register_command_handler(
"list_profile",
analyzer.list_profile,
analyzer.get_help("list_profile"),
prefix_aliases=["lp"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
return cli
|
tensorflow-master
|
tensorflow/python/debug/cli/profile_analyzer_cli.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tensor formatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.debug.cli import cli_test_utils
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
np.set_printoptions(
precision=8, threshold=1000, edgeitems=3, linewidth=75)
def _checkTensorMetadata(self, tensor, annotations):
self.assertEqual(
{"dtype": tensor.dtype, "shape": tensor.shape},
annotations["tensor_metadata"])
# Regular expression for text representation of float numbers, possibly in
# engineering notation.
_ELEMENT_REGEX = re.compile(
r"([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?|nan|inf|-inf)")
def _checkBeginIndicesAnnotations(self, out, a):
"""Check the beginning-index annotations of an ndarray representation.
Args:
out: An instance of RichTextLines representing a numpy.ndarray.
a: The numpy.ndarray being represented.
Raises:
ValueError: if any ellipses ("...") are found in the lines representing
the array.
"""
begin_line_num = 0
while not out.lines[begin_line_num].startswith("array"):
begin_line_num += 1
element_index = 0
for line_num in range(begin_line_num, len(out.lines)):
line = out.lines[line_num]
if "..." in line:
raise ValueError("Unexpected found ellipses in line representing array")
matches = re.finditer(self._ELEMENT_REGEX, line)
for line_item_index, _ in enumerate(matches):
subscripts = list(np.unravel_index(element_index, a.shape))
if line_item_index == 0:
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: subscripts},
out.annotations[line_num])
element_index += 1
self.assertEqual(element_index, np.size(a))
def _checkTensorElementLocations(self, out, a):
"""Check the results of locate_tensor_element on an ndarray representation.
that represents a numpy.ndaray.
Args:
out: An instance of RichTextLines representing a numpy.ndarray.
a: The numpy.ndarray being represented.
Raises:
ValueError: if any ellipses ("...") are found in the lines representing
the array.
"""
# First, locate the beginning of the tensor value section.
begin_line_num = 0
while not out.lines[begin_line_num].startswith("array"):
begin_line_num += 1
# Second, find all matches to tensor-value regex.
element_index = 0
for line_num in range(begin_line_num, len(out.lines)):
line = out.lines[line_num]
if "..." in line:
raise ValueError("Unexpected found ellipses in line representing array")
matches = re.finditer(self._ELEMENT_REGEX, line)
for match in matches:
subscripts = list(np.unravel_index(element_index, a.shape))
is_omitted, row, start_col, end_col = (
tensor_format.locate_tensor_element(out, subscripts))
self.assertFalse(is_omitted)
self.assertEqual(line_num, row)
self.assertEqual(match.start(), start_col)
self.assertEqual(match.end(), end_col)
element_index += 1
self.assertEqual(element_index, np.size(a))
def _findFirst(self, lines, string):
"""Find first occurrence of a string in a list of strings."""
for i, line in enumerate(lines):
find_index = line.find(string)
if find_index >= 0:
return i, find_index
def _extractBoldNumbers(self, out, start_line):
"""Extract all numbers that have the bold font attribute.
Args:
out: An instance of RichTextLines.
start_line: 0-based index to start from.
Returns:
A list of floats.
"""
floats = []
for i in range(start_line, len(out.lines)):
if i not in out.font_attr_segs:
continue
line_attrs = out.font_attr_segs[i]
for begin, end, attr_value in line_attrs:
if attr_value == "bold":
floats.append(float(out.lines[i][begin:end]))
return floats
def testFormatZeroDimensionTensor(self):
a = np.array(42, dtype=np.int32)
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertTrue(out.lines[2].startswith("array(42"))
self._checkTensorMetadata(a, out.annotations)
def testFormatTensorHighlightsTensorNameWithoutDebugOp(self):
tensor_name = "a_tensor:0"
a = np.zeros(2)
out = tensor_format.format_tensor(
a, tensor_name, np_printoptions={"linewidth": 40})
self.assertEqual([(8, 8 + len(tensor_name), "bold")], out.font_attr_segs[0])
def testFormatTensorHighlightsTensorNameWithDebugOp(self):
tensor_name = "a_tensor:0"
debug_op = "DebugIdentity"
a = np.zeros(2)
out = tensor_format.format_tensor(
a, "%s:%s" % (tensor_name, debug_op), np_printoptions={"linewidth": 40})
self.assertEqual([(8, 8 + len(tensor_name), "bold"),
(8 + len(tensor_name) + 1,
8 + len(tensor_name) + 1 + len(debug_op), "yellow")],
out.font_attr_segs[0])
def testFormatTensor1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor2DNoEllipsisNoRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensorSuppressingTensorName(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, None)
self.assertEqual(repr(a).split("\n"), out.lines)
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensorWithMetadata(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_metadata=True)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\":",
" dtype: float64",
" shape: (4, 4)",
""], out.lines[:4])
self.assertEqual(repr(a).split("\n"), out.lines[4:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor2DNoEllipsisWithRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 40.0, 40).reshape([2, 20])
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 50})
self.assertEqual(
{"dtype": a.dtype, "shape": a.shape},
out.annotations["tensor_metadata"])
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor3DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor3DNoEllipsisWithArgwhereHighlightWithMatches(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
lower_bound = 0.26
upper_bound = 0.5
def highlight_filter(x):
return np.logical_and(x > lower_bound, x < upper_bound)
highlight_options = tensor_format.HighlightOptions(
highlight_filter, description="between 0.26 and 0.5")
out = tensor_format.format_tensor(
a, "a", highlight_options=highlight_options)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\": "
"Highlighted(between 0.26 and 0.5): 5 of 24 element(s) (20.83%)",
""],
out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
self.assertAllClose(
[0.29166667, 0.33333333, 0.375, 0.41666667, 0.45833333],
self._extractBoldNumbers(out, 2))
def testFormatTensor3DNoEllipsisWithArgwhereHighlightWithNoMatches(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
def highlight_filter(x):
return x > 10.0
highlight_options = tensor_format.HighlightOptions(highlight_filter)
out = tensor_format.format_tensor(
a, "a", highlight_options=highlight_options)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\": Highlighted: 0 of 24 element(s) (0.00%)", ""],
out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
# Check font attribute segments for highlighted elements.
for i in range(2, len(out.lines)):
self.assertNotIn(i, out.font_attr_segs)
def testFormatTensorWithEllipses(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
actual_row_0_0_0, _ = self._findFirst(out.lines, "1000")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [0, 0, 0]},
out.annotations[actual_row_0_0_0])
actual_row_0_1_0, _ = self._findFirst(out.lines, "1011")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [0, 1, 0]},
out.annotations[actual_row_0_1_0])
# Find the first line that is completely omitted.
omitted_line = 2
while not out.lines[omitted_line].strip().startswith("..."):
omitted_line += 1
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: [0, 2, 0]},
out.annotations[omitted_line])
actual_row_10_10_0, _ = self._findFirst(out.lines, "2320")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [10, 10, 0]},
out.annotations[actual_row_10_10_0])
# Find the last line that is completely omitted.
omitted_line = len(out.lines) - 1
while not out.lines[omitted_line].strip().startswith("..."):
omitted_line -= 1
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: [10, 2, 0]},
out.annotations[omitted_line])
def testFormatUninitializedTensor(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_FLOAT"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto, False), "a")
self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor:"],
out.lines[:3])
self.assertEqual(str(tensor_proto).split("\n"), out.lines[3:])
def testFormatResourceTypeTensor(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_RESOURCE"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto), "a")
self.assertEqual(["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(str(tensor_proto).split("\n"), out.lines[2:])
def testLocateTensorElement1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [20])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0, 0])
def testLocateTensorElement1DNoEllipsisBatchMode(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
def testBatchModeWithErrors(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
with self.assertRaisesRegexp(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [[0, 0], [0]])
with self.assertRaisesRegexp(ValueError,
"Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [[0], [20]])
with self.assertRaisesRegexp(ValueError,
r"Indices contain negative value\(s\)"):
tensor_format.locate_tensor_element(out, [[0], [-1]])
with self.assertRaisesRegexp(
ValueError, "Input indices sets are not in ascending order"):
tensor_format.locate_tensor_element(out, [[5], [0]])
def testLocateTensorElement1DTinyAndNanValues(self):
a = np.ones([3, 3]) * 1e-8
a[1, 0] = np.nan
a[1, 2] = np.inf
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 100})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
def testLocateTensorElement2DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement2DNoEllipsisWithNumericSummary(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_numeric_summary=True)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\":",
"",
"Numeric summary:",
"| 0 + | total |",
"| 1 15 | 16 |",
"| min max mean std |"],
out.lines[:6])
cli_test_utils.assert_array_lines_close(
self, [0.0, 0.9375, 0.46875, 0.28811076429], out.lines[6:7])
cli_test_utils.assert_array_lines_close(self, a, out.lines[8:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement3DWithEllipses(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
actual_row_0_0_0, actual_col_0_0_0 = self._findFirst(out.lines, "1000")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 0, 0])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_0_0, row)
self.assertEqual(actual_col_0_0_0, start_col)
self.assertEqual(actual_col_0_0_0 + 4, end_col)
actual_row_0_0_10, _ = self._findFirst(out.lines, "1010")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 0, 10])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_0_10, row)
self.assertIsNone(start_col) # Passes ellipsis.
self.assertIsNone(end_col)
actual_row_0_1_0, actual_col_0_1_0 = self._findFirst(out.lines, "1011")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 1, 0])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_1_0, row)
self.assertEqual(actual_col_0_1_0, start_col)
self.assertEqual(actual_col_0_1_0 + 4, end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 2, 0])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 2, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 8, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
actual_row_0_10_1, actual_col_0_10_1 = self._findFirst(out.lines, "1111")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 10, 1])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_10_1, row)
self.assertEqual(actual_col_0_10_1, start_col)
self.assertEqual(actual_col_0_10_1 + 4, end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [5, 1, 1])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
actual_row_10_10_10, _ = self._findFirst(out.lines, "2330")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [10, 10, 10])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_10_10_10, row)
self.assertIsNone(start_col) # Past ellipsis.
self.assertIsNone(end_col)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [11, 5, 5])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 5, 5])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [5, 5])
def testLocateTensorElement3DWithEllipsesBatchMode(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100,
"edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
actual_row_0_0_0, actual_col_0_0_0 = self._findFirst(out.lines, "1000")
actual_row_0_0_10, _ = self._findFirst(out.lines, "1010")
actual_row_10_10_10, _ = self._findFirst(out.lines, "2330")
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out, [[0, 0, 0]])
self.assertEqual([False], are_omitted)
self.assertEqual([actual_row_0_0_0], rows)
self.assertEqual([actual_col_0_0_0], start_cols)
self.assertEqual([actual_col_0_0_0 + 4], end_cols)
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [0, 0, 10]])
self.assertEqual([False, False], are_omitted)
self.assertEqual([actual_row_0_0_0, actual_row_0_0_10], rows)
self.assertEqual([actual_col_0_0_0, None], start_cols)
self.assertEqual([actual_col_0_0_0 + 4, None], end_cols)
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [0, 2, 0]])
self.assertEqual([False, True], are_omitted)
self.assertEqual([2, 4], rows)
self.assertEqual(2, len(start_cols))
self.assertEqual(2, len(end_cols))
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [10, 10, 10]])
self.assertEqual([False, False], are_omitted)
self.assertEqual([actual_row_0_0_0, actual_row_10_10_10], rows)
self.assertEqual([actual_col_0_0_0, None], start_cols)
self.assertEqual([actual_col_0_0_0 + 4, None], end_cols)
def testLocateTensorElementAnnotationsUnavailable(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_FLOAT"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto, False), "a")
self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor:"],
out.lines[:3])
with self.assertRaisesRegexp(
AttributeError, "tensor_metadata is not available in annotations"):
tensor_format.locate_tensor_element(out, [0])
class NumericSummaryTest(test_util.TensorFlowTestCase):
def testNumericSummaryOnFloatFullHouse(self):
x = np.array([np.nan, np.nan, -np.inf, np.inf, np.inf, np.inf, -2, -3, -4,
0, 1, 2, 2, 2, 2, 0, 0, 0, np.inf, np.inf, np.inf])
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["| nan -inf - 0 + +inf | total |",
"| 2 1 3 4 5 6 | 21 |",
"| min max mean std |"], out.lines[:3])
cli_test_utils.assert_array_lines_close(
self, [-4.0, 2.0, 0.0, 1.95789002075], out.lines[3:4])
def testNumericSummaryOnFloatMissingCategories(self):
x = np.array([np.nan, np.nan])
out = tensor_format.numeric_summary(x)
self.assertEqual(2, len(out.lines))
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["| nan | total |", "| 2 | 2 |"], out.lines[:2])
x = np.array([-np.inf, np.inf, 0, 0, np.inf, np.inf])
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["| -inf 0 +inf | total |",
"| 1 2 3 | 6 |",
"| min max mean std |"], out.lines[:3])
cli_test_utils.assert_array_lines_close(
self, [0.0, 0.0, 0.0, 0.0], out.lines[3:4])
x = np.array([-120, 120, 130])
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["| - + | total |",
"| 1 2 | 3 |",
"| min max mean std |"],
out.lines[:3])
cli_test_utils.assert_array_lines_close(
self, [-120, 130, 43.3333333333, 115.566238822], out.lines[3:4])
def testNumericSummaryOnEmptyFloat(self):
x = np.array([], dtype=np.float32)
out = tensor_format.numeric_summary(x)
self.assertEqual(["No numeric summary available due to empty tensor."],
out.lines)
def testNumericSummaryOnInt(self):
x = np.array([-3] * 50 + [3] * 200 + [0], dtype=np.int32)
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["| - 0 + | total |",
"| 50 1 200 | 251 |",
"| min max mean std |"],
out.lines[:3])
cli_test_utils.assert_array_lines_close(
self, [-3, 3, 1.79282868526, 2.39789673081], out.lines[3:4])
def testNumericSummaryOnBool(self):
x = np.array([False, True, True, False], dtype=np.bool)
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["| False True | total |", "| 2 2 | 4 |"], out.lines)
x = np.array([True] * 10, dtype=np.bool)
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["| True | total |", "| 10 | 10 |"], out.lines)
x = np.array([False] * 10, dtype=np.bool)
out = tensor_format.numeric_summary(x)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["| False | total |", "| 10 | 10 |"], out.lines)
x = np.array([], dtype=np.bool)
out = tensor_format.numeric_summary(x)
self.assertEqual(["No numeric summary available due to empty tensor."],
out.lines)
def testNumericSummaryOnStrTensor(self):
x = np.array(["spam", "egg"], dtype=np.object)
out = tensor_format.numeric_summary(x)
self.assertEqual(
["No numeric summary available due to tensor dtype: object."],
out.lines)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/tensor_format_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Class of TensorFlow Debugger (tfdbg) Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
class BaseUI(object):
"""Base class of tfdbg user interface."""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
ERROR_MESSAGE_PREFIX = "ERROR: "
INFO_MESSAGE_PREFIX = "INFO: "
def __init__(self, on_ui_exit=None, config=None):
"""Constructor of the base class.
Args:
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
"""
self._on_ui_exit = on_ui_exit
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._config = config or cli_config.CLIConfig()
self._config_argparser = argparse.ArgumentParser(
description="config command", usage=argparse.SUPPRESS)
subparsers = self._config_argparser.add_subparsers()
set_parser = subparsers.add_parser("set")
set_parser.add_argument("property_name", type=str)
set_parser.add_argument("property_value", type=str)
set_parser = subparsers.add_parser("show")
self.register_command_handler(
"config",
self._config_command_handler,
self._config_argparser.format_help(),
prefix_aliases=["cfg"])
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (RichTextLines) Rich text lines appended to the beginning of
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the UI until user- or command- triggered exit.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on
start-up.
Returns:
An exit token of arbitrary type. Can be None.
"""
raise NotImplementedError("run_ui() is not implemented in BaseUI")
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
output_file_path: (str or None) The path to save the screen output
to (if any).
"""
command = command.strip()
if not command:
return "", [], None
command_items = command_parser.parse_command(command)
command_items, output_file_path = command_parser.extract_output_file_path(
command_items)
return command_items[0], command_items[1:], output_file_path
def _analyze_tab_complete_input(self, text):
"""Analyze raw input to tab-completer.
Args:
text: (str) the full, raw input text to be tab-completed.
Returns:
context: (str) the context str. For example,
If text == "print_tensor softmax", returns "print_tensor".
If text == "print", returns "".
If text == "", returns "".
prefix: (str) the prefix to be tab-completed, from the last word.
For example, if text == "print_tensor softmax", returns "softmax".
If text == "print", returns "print".
If text == "", returns "".
except_last_word: (str) the input text, except the last word.
For example, if text == "print_tensor softmax", returns "print_tensor".
If text == "print_tensor -a softmax", returns "print_tensor -a".
If text == "print", returns "".
If text == "", returns "".
"""
text = text.lstrip()
if not text:
# Empty (top-level) context.
context = ""
prefix = ""
except_last_word = ""
else:
items = text.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
except_last_word = ""
else:
# Multiple words.
context = items[0]
prefix = items[-1]
except_last_word = " ".join(items[:-1]) + " "
return context, prefix, except_last_word
@property
def config(self):
"""Obtain the CLIConfig of this `BaseUI` instance."""
return self._config
def _config_command_handler(self, args, screen_info=None):
"""Command handler for the "config" command."""
del screen_info # Currently unused.
parsed = self._config_argparser.parse_args(args)
if hasattr(parsed, "property_name") and hasattr(parsed, "property_value"):
# set.
self._config.set(parsed.property_name, parsed.property_value)
return self._config.summarize(highlight=parsed.property_name)
else:
# show.
return self._config.summarize()
|
tensorflow-master
|
tensorflow/python/debug/cli/base_ui.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for arbitrary expression evaluation based on a debugger data dump."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np # pylint: disable=unused-import
from tensorflow.python.debug.lib import debug_data
_DUMP_TENSOR_PATTERN = re.compile(r"`.*?`")
_DEVICE_NAME_PREFIX_PATTERN = re.compile(
r"/job:(\w)+/replica:(\d)+/task:(\d)+/(\w)+:(\d)+:")
_EXEC_INDEX_SUFFIX_PATTERN = re.compile(r"\[(\d)*\]$")
_DEFAULT_DEBUG_OP = "DebugIdentity"
def _parse_debug_tensor_name(debug_tensor_name):
# pylint: disable=line-too-long
"""Parse a debug tensor name in a to-be-evaluated expression.
Args:
debug_tensor_name: name of the debug tensor, with or without
device name as a prefix, with or without debug op, with or
without '[<exec_index>]' as a suffix.
E.g., without device name prefix, without debug op suffix:
"hidden_0/MatMul:0"
E.g., with device name prefix:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0"
E.g., with debug op suffix:
"hidden_0/MatMul:0:DebugNumericSummary"
E.g., with device name prefix and debug op suffix:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary"
E.g., with device name prefix, debug op and an exec index:
"/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary[1]"
Returns:
device_name: If device name prefix exists, the device name; otherwise,
`None`.
node_name: Name of the node.
output_slot: Output slot index as an `int`.
debug_op: If the debug op suffix exists, the debug op name; otheriwse,
`None`.
exec_index: Execution index (applicable to cases in which a debug tensor
is computed multiple times in a `tf.Session.run` call, e.g., due to
`tf.while_loop`). If the exec_index suffix does not exist, this value
defaults to `0`.
Raises:
ValueError: If the input `debug_tensor_name` is malformed.
"""
# pylint: enable=line-too-long
device_prefix_match = re.match(_DEVICE_NAME_PREFIX_PATTERN, debug_tensor_name)
if device_prefix_match:
device_name = debug_tensor_name[
device_prefix_match.start() : device_prefix_match.end() - 1]
debug_tensor_name = debug_tensor_name[device_prefix_match.end():]
else:
device_name = None
split_items = debug_tensor_name.split(":")
if len(split_items) not in (2, 3):
raise ValueError(
"The debug tensor name in the to-be-evaluated expression is malformed: "
"'%s'" % debug_tensor_name)
# TODO(cais): Provide examples of good debug tensor names in the error
# message.
exec_index_match = re.search(_EXEC_INDEX_SUFFIX_PATTERN, split_items[-1])
if exec_index_match:
exec_index = int(split_items[-1][
exec_index_match.start() + 1 : exec_index_match.end() - 1])
split_items[-1] = split_items[-1][:exec_index_match.start()]
else:
exec_index = 0
if len(split_items) == 2:
node_name = split_items[0]
output_slot = int(split_items[1])
debug_op = _DEFAULT_DEBUG_OP
else:
split_items = debug_tensor_name.split(":")
node_name = split_items[0]
output_slot = int(split_items[1])
debug_op = split_items[2]
return device_name, node_name, output_slot, debug_op, exec_index
class ExpressionEvaluator(object):
"""Evaluates Python expressions using debug tensor values from a dump."""
def __init__(self, dump):
"""Constructor of ExpressionEvaluator.
Args:
dump: an instance of `DebugDumpDir`.
"""
self._dump = dump
self._cached_tensor_values = {}
def evaluate(self, expression):
"""Parse an expression.
Args:
expression: the expression to be parsed.
Returns:
The result of the evaluation.
Raises:
ValueError: If the value of one or more of the debug tensors in the
expression are not available.
"""
dump_tensors_iter = re.finditer(_DUMP_TENSOR_PATTERN, expression)
rewritten_expression = expression
for match in reversed(list(dump_tensors_iter)):
tensor_name = match.group(0)[1:-1].strip()
device_name, node_name, output_slot, debug_op, exec_index = (
_parse_debug_tensor_name(tensor_name))
if tensor_name not in self._cached_tensor_values:
try:
value = self._dump.get_tensors(
node_name, output_slot, debug_op,
device_name=device_name)[exec_index]
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
raise ValueError(
"Eval failed due to the value of %s:%d:DebugIdentity being "
"unavailable" % (node_name, output_slot))
self._cached_tensor_values[tensor_name] = value
rewritten_expression = (
rewritten_expression[:match.start(0)] +
"self._cached_tensor_values['" + tensor_name + "']" +
rewritten_expression[match.end(0):])
return eval(rewritten_expression) # pylint: disable=eval-used
|
tensorflow-master
|
tensorflow/python/debug/cli/evaluator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Widgets for Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.debug.cli import debugger_cli_common
RL = debugger_cli_common.RichLine
class NavigationHistoryItem(object):
"""Individual item in navigation history."""
def __init__(self, command, screen_output, scroll_position):
"""Constructor of NavigationHistoryItem.
Args:
command: (`str`) the command line text.
screen_output: the screen output of the command.
scroll_position: (`int`) scroll position in the screen output.
"""
self.command = command
self.screen_output = screen_output
self.scroll_position = scroll_position
class CursesNavigationHistory(object):
"""Navigation history containing commands, outputs and scroll info."""
BACK_ARROW_TEXT = "<--"
FORWARD_ARROW_TEXT = "-->"
def __init__(self, capacity):
"""Constructor of CursesNavigationHistory.
Args:
capacity: (`int`) How many items this object can hold. Each item consists
of a command stirng, an output RichTextLines object and a scroll
position.
Raises:
ValueError: If capacity is not a positive number.
"""
if capacity <= 0:
raise ValueError("In valid capacity value: %d" % capacity)
self._capacity = capacity
self._items = []
self._pointer = -1
def add_item(self, command, screen_output, scroll_position):
"""Add an item to the navigation histoyr.
Args:
command: command line text.
screen_output: screen output produced for the command.
scroll_position: (`int`) scroll position in the screen output.
"""
if self._pointer + 1 < len(self._items):
self._items = self._items[:self._pointer + 1]
self._items.append(
NavigationHistoryItem(command, screen_output, scroll_position))
if len(self._items) > self._capacity:
self._items = self._items[-self._capacity:]
self._pointer = len(self._items) - 1
def update_scroll_position(self, new_scroll_position):
"""Update the scroll position of the currently-pointed-to history item.
Args:
new_scroll_position: (`int`) new scroll-position value.
Raises:
ValueError: If the history is empty.
"""
if not self._items:
raise ValueError("Empty navigation history")
self._items[self._pointer].scroll_position = new_scroll_position
def size(self):
return len(self._items)
def pointer(self):
return self._pointer
def go_back(self):
"""Go back one place in the history, if possible.
Decrease the pointer value by 1, if possible. Otherwise, the pointer value
will be unchanged.
Returns:
The updated pointer value.
Raises:
ValueError: If history is empty.
"""
if not self._items:
raise ValueError("Empty navigation history")
if self.can_go_back():
self._pointer -= 1
return self._items[self._pointer]
def go_forward(self):
"""Go forward one place in the history, if possible.
Increase the pointer value by 1, if possible. Otherwise, the pointer value
will be unchanged.
Returns:
The updated pointer value.
Raises:
ValueError: If history is empty.
"""
if not self._items:
raise ValueError("Empty navigation history")
if self.can_go_forward():
self._pointer += 1
return self._items[self._pointer]
def can_go_back(self):
"""Test whether client can go back one place.
Returns:
(`bool`) Whether going back one place is possible.
"""
return self._pointer >= 1
def can_go_forward(self):
"""Test whether client can go forward one place.
Returns:
(`bool`) Whether going back one place is possible.
"""
return self._pointer + 1 < len(self._items)
def render(self,
max_length,
backward_command,
forward_command,
latest_command_attribute="black_on_white",
old_command_attribute="magenta_on_white"):
"""Render the rich text content of the single-line navigation bar.
Args:
max_length: (`int`) Maximum length of the navigation bar, in characters.
backward_command: (`str`) command for going backward. Used to construct
the shortcut menu item.
forward_command: (`str`) command for going forward. Used to construct the
shortcut menu item.
latest_command_attribute: font attribute for lastest command.
old_command_attribute: font attribute for old (non-latest) command.
Returns:
(`debugger_cli_common.RichTextLines`) the navigation bar text with
attributes.
"""
output = RL("| ")
output += RL(
self.BACK_ARROW_TEXT,
(debugger_cli_common.MenuItem(None, backward_command)
if self.can_go_back() else None))
output += RL(" ")
output += RL(
self.FORWARD_ARROW_TEXT,
(debugger_cli_common.MenuItem(None, forward_command)
if self.can_go_forward() else None))
if self._items:
command_attribute = (latest_command_attribute
if (self._pointer == (len(self._items) - 1))
else old_command_attribute)
output += RL(" | ")
if self._pointer != len(self._items) - 1:
output += RL("(-%d) " % (len(self._items) - 1 - self._pointer),
command_attribute)
if len(output) < max_length:
maybe_truncated_command = self._items[self._pointer].command[
:(max_length - len(output))]
output += RL(maybe_truncated_command, command_attribute)
return debugger_cli_common.rich_text_lines_from_rich_line_list([output])
|
tensorflow-master
|
tensorflow/python/debug/cli/curses_widgets.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Building Blocks of TensorFlow Debugger Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import sre_constants
import traceback
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.platform import gfile
HELP_INDENT = " "
EXPLICIT_USER_EXIT = "explicit_user_exit"
REGEX_MATCH_LINES_KEY = "regex_match_lines"
INIT_SCROLL_POS_KEY = "init_scroll_pos"
MAIN_MENU_KEY = "mm:"
class CommandLineExit(Exception):
def __init__(self, exit_token=None):
Exception.__init__(self)
self._exit_token = exit_token
@property
def exit_token(self):
return self._exit_token
class RichLine(object):
"""Rich single-line text.
Attributes:
text: A plain string, the raw text represented by this object. Should not
contain newlines.
font_attr_segs: A list of (start, end, font attribute) triples, representing
richness information applied to substrings of text.
"""
def __init__(self, text="", font_attr=None):
"""Construct a RichLine with no rich attributes or a single attribute.
Args:
text: Raw text string
font_attr: If specified, a single font attribute to be applied to the
entire text. Extending this object via concatenation allows creation
of text with varying attributes.
"""
# TODO(ebreck) Make .text and .font_attr protected members when we no
# longer need public access.
self.text = text
if font_attr:
self.font_attr_segs = [(0, len(text), font_attr)]
else:
self.font_attr_segs = []
def __add__(self, other):
"""Concatenate two chunks of maybe rich text to make a longer rich line.
Does not modify self.
Args:
other: Another piece of text to concatenate with this one.
If it is a plain str, it will be appended to this string with no
attributes. If it is a RichLine, it will be appended to this string
with its attributes preserved.
Returns:
A new RichLine comprising both chunks of text, with appropriate
attributes applied to the corresponding substrings.
"""
ret = RichLine()
if isinstance(other, six.string_types):
ret.text = self.text + other
ret.font_attr_segs = self.font_attr_segs[:]
return ret
elif isinstance(other, RichLine):
ret.text = self.text + other.text
ret.font_attr_segs = self.font_attr_segs[:]
old_len = len(self.text)
for start, end, font_attr in other.font_attr_segs:
ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))
return ret
else:
raise TypeError("%r cannot be concatenated with a RichLine" % other)
def __len__(self):
return len(self.text)
def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):
"""Convert a list of RichLine objects or strings to a RichTextLines object.
Args:
rich_text_list: a list of RichLine objects or strings
annotations: annotatoins for the resultant RichTextLines object.
Returns:
A corresponding RichTextLines object.
"""
lines = []
font_attr_segs = {}
for i, rl in enumerate(rich_text_list):
if isinstance(rl, RichLine):
lines.append(rl.text)
if rl.font_attr_segs:
font_attr_segs[i] = rl.font_attr_segs
else:
lines.append(rl)
return RichTextLines(lines, font_attr_segs, annotations=annotations)
def get_tensorflow_version_lines(include_dependency_versions=False):
"""Generate RichTextLines with TensorFlow version info.
Args:
include_dependency_versions: Include the version of TensorFlow's key
dependencies, such as numpy.
Returns:
A formatted, multi-line `RichTextLines` object.
"""
lines = ["TensorFlow version: %s" % pywrap_tensorflow_internal.__version__]
lines.append("")
if include_dependency_versions:
lines.append("Dependency version(s):")
lines.append(" numpy: %s" % np.__version__)
lines.append("")
return RichTextLines(lines)
class RichTextLines(object):
"""Rich multi-line text.
Line-by-line text output, with font attributes (e.g., color) and annotations
(e.g., indices in a multi-dimensional tensor). Used as the text output of CLI
commands. Can be rendered on terminal environments such as curses.
This is not to be confused with Rich Text Format (RTF). This class is for text
lines only.
"""
def __init__(self, lines, font_attr_segs=None, annotations=None):
"""Constructor of RichTextLines.
Args:
lines: A list of str or a single str, representing text output to
screen. The latter case is for convenience when the text output is
single-line.
font_attr_segs: A map from 0-based row index to a list of 3-tuples.
It lists segments in each row that have special font attributes, such
as colors, that are not the default attribute. For example:
{1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]}
In each tuple, the 1st element is the start index of the segment. The
2nd element is the end index, in an "open interval" fashion. The 3rd
element is an object or a list of objects that represents the font
attribute. Colors are represented as strings as in the examples above.
annotations: A map from 0-based row index to any object for annotating
the row. A typical use example is annotating rows of the output as
indices in a multi-dimensional tensor. For example, consider the
following text representation of a 3x2x2 tensor:
[[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]
The annotation can indicate the indices of the first element shown in
each row, i.e.,
{0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}
This information can make display of tensors on screen clearer and can
help the user navigate (scroll) to the desired location in a large
tensor.
Raises:
ValueError: If lines is of invalid type.
"""
if isinstance(lines, list):
self._lines = lines
elif isinstance(lines, six.string_types):
self._lines = [lines]
else:
raise ValueError("Unexpected type in lines: %s" % type(lines))
self._font_attr_segs = font_attr_segs
if not self._font_attr_segs:
self._font_attr_segs = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
self._annotations = annotations
if not self._annotations:
self._annotations = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
@property
def lines(self):
return self._lines
@property
def font_attr_segs(self):
return self._font_attr_segs
@property
def annotations(self):
return self._annotations
def num_lines(self):
return len(self._lines)
def slice(self, begin, end):
"""Slice a RichTextLines object.
The object itself is not changed. A sliced instance is returned.
Args:
begin: (int) Beginning line index (inclusive). Must be >= 0.
end: (int) Ending line index (exclusive). Must be >= 0.
Returns:
(RichTextLines) Sliced output instance of RichTextLines.
Raises:
ValueError: If begin or end is negative.
"""
if begin < 0 or end < 0:
raise ValueError("Encountered negative index.")
# Copy lines.
lines = self.lines[begin:end]
# Slice font attribute segments.
font_attr_segs = {}
for key in self.font_attr_segs:
if key >= begin and key < end:
font_attr_segs[key - begin] = self.font_attr_segs[key]
# Slice annotations.
annotations = {}
for key in self.annotations:
if not isinstance(key, int):
# Annotations can contain keys that are not line numbers.
annotations[key] = self.annotations[key]
elif key >= begin and key < end:
annotations[key - begin] = self.annotations[key]
return RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
def extend(self, other):
"""Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
"""
orig_num_lines = self.num_lines() # Record original number of lines.
# Merge the lines.
self._lines.extend(other.lines)
# Merge the font_attr_segs.
for line_index in other.font_attr_segs:
self._font_attr_segs[orig_num_lines + line_index] = (
other.font_attr_segs[line_index])
# Merge the annotations.
for key in other.annotations:
if isinstance(key, int):
self._annotations[orig_num_lines + key] = (other.annotations[key])
else:
self._annotations[key] = other.annotations[key]
def _extend_before(self, other):
"""Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object.
"""
other_num_lines = other.num_lines() # Record original number of lines.
# Merge the lines.
self._lines = other.lines + self._lines
# Merge the font_attr_segs.
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = (
self.font_attr_segs[line_index])
new_font_attr_segs.update(other.font_attr_segs)
self._font_attr_segs = new_font_attr_segs
# Merge the annotations.
new_annotations = {}
for key in self._annotations:
if isinstance(key, int):
new_annotations[other_num_lines + key] = (self.annotations[key])
else:
new_annotations[key] = other.annotations[key]
new_annotations.update(other.annotations)
self._annotations = new_annotations
def append(self, line, font_attr_segs=None):
"""Append a single line of text.
Args:
line: (str) The text to be added to the end.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
self._lines.append(line)
if font_attr_segs:
self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
def append_rich_line(self, rich_line):
self.append(rich_line.text, rich_line.font_attr_segs)
def prepend(self, line, font_attr_segs=None):
"""Prepend (i.e., add to the front) a single line of text.
Args:
line: (str) The text to be added to the front.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
other = RichTextLines(line)
if font_attr_segs:
other.font_attr_segs[0] = font_attr_segs
self._extend_before(other)
def write_to_file(self, file_path):
"""Write the object itself to file, in a plain format.
The font_attr_segs and annotations are ignored.
Args:
file_path: (str) path of the file to write to.
"""
with gfile.Open(file_path, "w") as f:
for line in self._lines:
f.write(line + "\n")
# TODO(cais): Add a method to allow appending to a line in RichTextLines with
# both text and font_attr_segs.
def regex_find(orig_screen_output, regex, font_attr):
"""Perform regex match in rich text lines.
Produces a new RichTextLines object with font_attr_segs containing highlighted
regex matches.
Example use cases include:
1) search for specific items in a large list of items, and
2) search for specific numerical values in a large tensor.
Args:
orig_screen_output: The original RichTextLines, in which the regex find
is to be performed.
regex: The regex used for matching.
font_attr: Font attribute used for highlighting the found result.
Returns:
A modified copy of orig_screen_output.
Raises:
ValueError: If input str regex is not a valid regular expression.
"""
new_screen_output = RichTextLines(
orig_screen_output.lines,
font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),
annotations=orig_screen_output.annotations)
try:
re_prog = re.compile(regex)
except sre_constants.error:
raise ValueError("Invalid regular expression: \"%s\"" % regex)
regex_match_lines = []
for i in xrange(len(new_screen_output.lines)):
line = new_screen_output.lines[i]
find_it = re_prog.finditer(line)
match_segs = []
for match in find_it:
match_segs.append((match.start(), match.end(), font_attr))
if match_segs:
if i not in new_screen_output.font_attr_segs:
new_screen_output.font_attr_segs[i] = match_segs
else:
new_screen_output.font_attr_segs[i].extend(match_segs)
new_screen_output.font_attr_segs[i] = sorted(
new_screen_output.font_attr_segs[i], key=lambda x: x[0])
regex_match_lines.append(i)
new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
return new_screen_output
def wrap_rich_text_lines(inp, cols):
"""Wrap RichTextLines according to maximum number of columns.
Produces a new RichTextLines object with the text lines, font_attr_segs and
annotations properly wrapped. This ought to be used sparingly, as in most
cases, command handlers producing RichTextLines outputs should know the
screen/panel width via the screen_info kwarg and should produce properly
length-limited lines in the output accordingly.
Args:
inp: Input RichTextLines object.
cols: Number of columns, as an int.
Returns:
1) A new instance of RichTextLines, with line lengths limited to cols.
2) A list of new (wrapped) line index. For example, if the original input
consists of three lines and only the second line is wrapped, and it's
wrapped into two lines, this return value will be: [0, 1, 3].
Raises:
ValueError: If inputs have invalid types.
"""
new_line_indices = []
if not isinstance(inp, RichTextLines):
raise ValueError("Invalid type of input screen_output")
if not isinstance(cols, int):
raise ValueError("Invalid type of input cols")
out = RichTextLines([])
row_counter = 0 # Counter for new row index
for i in xrange(len(inp.lines)):
new_line_indices.append(out.num_lines())
line = inp.lines[i]
if i in inp.annotations:
out.annotations[row_counter] = inp.annotations[i]
if len(line) <= cols:
# No wrapping.
out.lines.append(line)
if i in inp.font_attr_segs:
out.font_attr_segs[row_counter] = inp.font_attr_segs[i]
row_counter += 1
else:
# Wrap.
wlines = [] # Wrapped lines.
osegs = []
if i in inp.font_attr_segs:
osegs = inp.font_attr_segs[i]
idx = 0
while idx < len(line):
if idx + cols > len(line):
rlim = len(line)
else:
rlim = idx + cols
wlines.append(line[idx:rlim])
for seg in osegs:
if (seg[0] < rlim) and (seg[1] >= idx):
# Calculate left bound within wrapped line.
if seg[0] >= idx:
lb = seg[0] - idx
else:
lb = 0
# Calculate right bound within wrapped line.
if seg[1] < rlim:
rb = seg[1] - idx
else:
rb = rlim - idx
if rb > lb: # Omit zero-length segments.
wseg = (lb, rb, seg[2])
if row_counter not in out.font_attr_segs:
out.font_attr_segs[row_counter] = [wseg]
else:
out.font_attr_segs[row_counter].append(wseg)
idx += cols
row_counter += 1
out.lines.extend(wlines)
# Copy over keys of annotation that are not row indices.
for key in inp.annotations:
if not isinstance(key, int):
out.annotations[key] = inp.annotations[key]
return out, new_line_indices
class CommandHandlerRegistry(object):
"""Registry of command handlers for CLI.
Handler methods (callables) for user commands can be registered with this
class, which then is able to dispatch commands to the correct handlers and
retrieve the RichTextLines output.
For example, suppose you have the following handler defined:
def echo(argv, screen_info=None):
return RichTextLines(["arguments = %s" % " ".join(argv),
"screen_info = " + repr(screen_info)])
you can register the handler with the command prefix "echo" and alias "e":
registry = CommandHandlerRegistry()
registry.register_command_handler("echo", echo,
"Echo arguments, along with screen info", prefix_aliases=["e"])
then to invoke this command handler with some arguments and screen_info, do:
registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80})
or with the prefix alias:
registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80})
The call will return a RichTextLines object which can be rendered by a CLI.
"""
HELP_COMMAND = "help"
HELP_COMMAND_ALIASES = ["h"]
VERSION_COMMAND = "version"
VERSION_COMMAND_ALIASES = ["ver"]
def __init__(self):
# A dictionary from command prefix to handler.
self._handlers = {}
# A dictionary from prefix alias to prefix.
self._alias_to_prefix = {}
# A dictionary from prefix to aliases.
self._prefix_to_aliases = {}
# A dictionary from command prefix to help string.
self._prefix_to_help = {}
# Introductory text to help information.
self._help_intro = None
# Register a default handler for the command "help".
self.register_command_handler(
self.HELP_COMMAND,
self._help_handler,
"Print this help message.",
prefix_aliases=self.HELP_COMMAND_ALIASES)
# Register a default handler for the command "version".
self.register_command_handler(
self.VERSION_COMMAND,
self._version_handler,
"Print the versions of TensorFlow and its key dependencies.",
prefix_aliases=self.VERSION_COMMAND_ALIASES)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""Register a callable as a command handler.
Args:
prefix: Command prefix, i.e., the first word in a command, e.g.,
"print" as in "print tensor_1".
handler: A callable of the following signature:
foo_handler(argv, screen_info=None),
where argv is the argument vector (excluding the command prefix) and
screen_info is a dictionary containing information about the screen,
such as number of columns, e.g., {"cols": 100}.
The callable should return:
1) a RichTextLines object representing the screen output.
The callable can also raise an exception of the type CommandLineExit,
which if caught by the command-line interface, will lead to its exit.
The exception can optionally carry an exit token of arbitrary type.
help_info: A help string.
prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,
shorthands for the command prefix: ["p", "pr"]
Raises:
ValueError: If
1) the prefix is empty, or
2) handler is not callable, or
3) a handler is already registered for the prefix, or
4) elements in prefix_aliases clash with existing aliases.
5) help_info is not a str.
"""
if not prefix:
raise ValueError("Empty command prefix")
if prefix in self._handlers:
raise ValueError(
"A handler is already registered for command prefix \"%s\"" % prefix)
# Make sure handler is callable.
if not callable(handler):
raise ValueError("handler is not callable")
# Make sure that help info is a string.
if not isinstance(help_info, six.string_types):
raise ValueError("help_info is not a str")
# Process prefix aliases.
if prefix_aliases:
for alias in prefix_aliases:
if self._resolve_prefix(alias):
raise ValueError(
"The prefix alias \"%s\" clashes with existing prefixes or "
"aliases." % alias)
self._alias_to_prefix[alias] = prefix
self._prefix_to_aliases[prefix] = prefix_aliases
# Store handler.
self._handlers[prefix] = handler
# Store help info.
self._prefix_to_help[prefix] = help_info
def dispatch_command(self, prefix, argv, screen_info=None):
"""Handles a command by dispatching it to a registered command handler.
Args:
prefix: Command prefix, as a str, e.g., "print".
argv: Command argument vector, excluding the command prefix, represented
as a list of str, e.g.,
["tensor_1"]
screen_info: A dictionary containing screen info, e.g., {"cols": 100}.
Returns:
An instance of RichTextLines or None. If any exception is caught during
the invocation of the command handler, the RichTextLines will wrap the
error type and message.
Raises:
ValueError: If
1) prefix is empty, or
2) no command handler is registered for the command prefix, or
3) the handler is found for the prefix, but it fails to return a
RichTextLines or raise any exception.
CommandLineExit:
If the command handler raises this type of exception, this method will
simply pass it along.
"""
if not prefix:
raise ValueError("Prefix is empty")
resolved_prefix = self._resolve_prefix(prefix)
if not resolved_prefix:
raise ValueError("No handler is registered for command prefix \"%s\"" %
prefix)
handler = self._handlers[resolved_prefix]
try:
output = handler(argv, screen_info=screen_info)
except CommandLineExit as e:
raise e
except SystemExit as e:
# Special case for syntax errors caught by argparse.
lines = ["Syntax error for command: %s" % prefix,
"For help, do \"help %s\"" % prefix]
output = RichTextLines(lines)
except BaseException as e: # pylint: disable=broad-except
lines = ["Error occurred during handling of command: %s %s:" %
(resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))]
# Include traceback of the exception.
lines.append("")
lines.extend(traceback.format_exc().split("\n"))
output = RichTextLines(lines)
if not isinstance(output, RichTextLines) and output is not None:
raise ValueError(
"Return value from command handler %s is not None or a RichTextLines "
"instance" % str(handler))
return output
def is_registered(self, prefix):
"""Test if a command prefix or its alias is has a registered handler.
Args:
prefix: A prefix or its alias, as a str.
Returns:
True iff a handler is registered for prefix.
"""
return self._resolve_prefix(prefix) is not None
def get_help(self, cmd_prefix=None):
"""Compile help information into a RichTextLines object.
Args:
cmd_prefix: Optional command prefix. As the prefix itself or one of its
aliases.
Returns:
A RichTextLines object containing the help information. If cmd_prefix
is None, the return value will be the full command-line help. Otherwise,
it will be the help information for the specified command.
"""
if not cmd_prefix:
# Print full help information, in sorted order of the command prefixes.
help_info = RichTextLines([])
if self._help_intro:
# If help intro is available, show it at the beginning.
help_info.extend(self._help_intro)
sorted_prefixes = sorted(self._handlers)
for cmd_prefix in sorted_prefixes:
lines = self._get_help_for_command_prefix(cmd_prefix)
lines.append("")
lines.append("")
help_info.extend(RichTextLines(lines))
return help_info
else:
return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
def set_help_intro(self, help_intro):
"""Set an introductory message to help output.
Args:
help_intro: (RichTextLines) Rich text lines appended to the
beginning of the output of the command "help", as introductory
information.
"""
self._help_intro = help_intro
def _help_handler(self, args, screen_info=None):
"""Command handler for "help".
"help" is a common command that merits built-in support from this class.
Args:
args: Command line arguments to "help" (not including "help" itself).
screen_info: (dict) Information regarding the screen, e.g., the screen
width in characters: {"cols": 80}
Returns:
(RichTextLines) Screen text output.
"""
_ = screen_info # Unused currently.
if not args:
return self.get_help()
elif len(args) == 1:
return self.get_help(args[0])
else:
return RichTextLines(["ERROR: help takes only 0 or 1 input argument."])
def _version_handler(self, args, screen_info=None):
del args # Unused currently.
del screen_info # Unused currently.
return get_tensorflow_version_lines(include_dependency_versions=True)
def _resolve_prefix(self, token):
"""Resolve command prefix from the prefix itself or its alias.
Args:
token: a str to be resolved.
Returns:
If resolvable, the resolved command prefix.
If not resolvable, None.
"""
if token in self._handlers:
return token
elif token in self._alias_to_prefix:
return self._alias_to_prefix[token]
else:
return None
def _get_help_for_command_prefix(self, cmd_prefix):
"""Compile the help information for a given command prefix.
Args:
cmd_prefix: Command prefix, as the prefix itself or one of its
aliases.
Returns:
A list of str as the help information fo cmd_prefix. If the cmd_prefix
does not exist, the returned list of str will indicate that.
"""
lines = []
resolved_prefix = self._resolve_prefix(cmd_prefix)
if not resolved_prefix:
lines.append("Invalid command prefix: \"%s\"" % cmd_prefix)
return lines
lines.append(resolved_prefix)
if resolved_prefix in self._prefix_to_aliases:
lines.append(HELP_INDENT + "Aliases: " + ", ".join(
self._prefix_to_aliases[resolved_prefix]))
lines.append("")
help_lines = self._prefix_to_help[resolved_prefix].split("\n")
for line in help_lines:
lines.append(HELP_INDENT + line)
return lines
class TabCompletionRegistry(object):
"""Registry for tab completion responses."""
def __init__(self):
self._comp_dict = {}
# TODO(cais): Rename method names with "comp" to "*completion*" to avoid
# confusion.
def register_tab_comp_context(self, context_words, comp_items):
"""Register a tab-completion context.
Register that, for each word in context_words, the potential tab-completions
are the words in comp_items.
A context word is a pre-existing, completed word in the command line that
determines how tab-completion works for another, incomplete word in the same
command line.
Completion items consist of potential candidates for the incomplete word.
To give a general example, a context word can be "drink", and the completion
items can be ["coffee", "tea", "water"]
Note: A context word can be empty, in which case the context is for the
top-level commands.
Args:
context_words: A list of context words belonging to the context being
registered. It is a list of str, instead of a single string, to support
synonym words triggering the same tab-completion context, e.g.,
both "drink" and the short-hand "dr" can trigger the same context.
comp_items: A list of completion items, as a list of str.
Raises:
TypeError: if the input arguments are not all of the correct types.
"""
if not isinstance(context_words, list):
raise TypeError("Incorrect type in context_list: Expected list, got %s" %
type(context_words))
if not isinstance(comp_items, list):
raise TypeError("Incorrect type in comp_items: Expected list, got %s" %
type(comp_items))
# Sort the completion items on registration, so that later during
# get_completions calls, no sorting will be necessary.
sorted_comp_items = sorted(comp_items)
for context_word in context_words:
self._comp_dict[context_word] = sorted_comp_items
def deregister_context(self, context_words):
"""Deregister a list of context words.
Args:
context_words: A list of context words to deregister, as a list of str.
Raises:
KeyError: if there are word(s) in context_words that do not correspond
to any registered contexts.
"""
for context_word in context_words:
if context_word not in self._comp_dict:
raise KeyError("Cannot deregister unregistered context word \"%s\"" %
context_word)
for context_word in context_words:
del self._comp_dict[context_word]
def extend_comp_items(self, context_word, new_comp_items):
"""Add a list of completion items to a completion context.
Args:
context_word: A single completion word as a string. The extension will
also apply to all other context words of the same context.
new_comp_items: (list of str) New completion items to add.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
self._comp_dict[context_word].extend(new_comp_items)
self._comp_dict[context_word] = sorted(self._comp_dict[context_word])
def remove_comp_items(self, context_word, comp_items):
"""Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
for item in comp_items:
self._comp_dict[context_word].remove(item)
def get_completions(self, context_word, prefix):
"""Get the tab completions given a context word and a prefix.
Args:
context_word: The context word.
prefix: The prefix of the incomplete word.
Returns:
(1) None if no registered context matches the context_word.
A list of str for the matching completion items. Can be an empty list
of a matching context exists, but no completion item matches the
prefix.
(2) Common prefix of all the words in the first return value. If the
first return value is None, this return value will be None, too. If
the first return value is not None, i.e., a list, this return value
will be a str, which can be an empty str if there is no common
prefix among the items of the list.
"""
if context_word not in self._comp_dict:
return None, None
comp_items = self._comp_dict[context_word]
comp_items = sorted(
[item for item in comp_items if item.startswith(prefix)])
return comp_items, self._common_prefix(comp_items)
def _common_prefix(self, m):
"""Given a list of str, returns the longest common prefix.
Args:
m: (list of str) A list of strings.
Returns:
(str) The longest common prefix.
"""
if not m:
return ""
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
class CommandHistory(object):
"""Keeps command history and supports lookup."""
_HISTORY_FILE_NAME = ".tfdbg_history"
def __init__(self, limit=100, history_file_path=None):
"""CommandHistory constructor.
Args:
limit: Maximum number of the most recent commands that this instance
keeps track of, as an int.
history_file_path: (str) Manually specified path to history file. Used in
testing.
"""
self._commands = []
self._limit = limit
self._history_file_path = (
history_file_path or self._get_default_history_file_path())
self._load_history_from_file()
def _load_history_from_file(self):
if os.path.isfile(self._history_file_path):
try:
with open(self._history_file_path, "rt") as history_file:
commands = history_file.readlines()
self._commands = [command.strip() for command in commands
if command.strip()]
# Limit the size of the history file.
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
with open(self._history_file_path, "wt") as history_file:
for command in self._commands:
history_file.write(command + "\n")
except IOError:
print("WARNING: writing history file failed.")
def _add_command_to_history_file(self, command):
try:
with open(self._history_file_path, "at") as history_file:
history_file.write(command + "\n")
except IOError:
pass
@classmethod
def _get_default_history_file_path(cls):
return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME)
def add_command(self, command):
"""Add a command to the command history.
Args:
command: The history command, as a str.
Raises:
TypeError: if command is not a str.
"""
if self._commands and command == self._commands[-1]:
# Ignore repeating commands in a row.
return
if not isinstance(command, six.string_types):
raise TypeError("Attempt to enter non-str entry to command history")
self._commands.append(command)
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
self._add_command_to_history_file(command)
def most_recent_n(self, n):
"""Look up the n most recent commands.
Args:
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands, or all available most recent commands,
if n exceeds size of the command history, in chronological order.
"""
return self._commands[-n:]
def lookup_prefix(self, prefix, n):
"""Look up the n most recent commands that starts with prefix.
Args:
prefix: The prefix to lookup.
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands that have the specified prefix, or all
available most recent commands that have the prefix, if n exceeds the
number of history commands with the prefix.
"""
commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]
return commands[-n:]
# TODO(cais): Lookup by regex.
class MenuItem(object):
"""A class for an item in a text-based menu."""
def __init__(self, caption, content, enabled=True):
"""Menu constructor.
TODO(cais): Nested menu is currently not supported. Support it.
Args:
caption: (str) caption of the menu item.
content: Content of the menu item. For a menu item that triggers
a command, for example, content is the command string.
enabled: (bool) whether this menu item is enabled.
"""
self._caption = caption
self._content = content
self._enabled = enabled
@property
def caption(self):
return self._caption
@property
def type(self):
return self._node_type
@property
def content(self):
return self._content
def is_enabled(self):
return self._enabled
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
class Menu(object):
"""A class for text-based menu."""
def __init__(self, name=None):
"""Menu constructor.
Args:
name: (str or None) name of this menu.
"""
self._name = name
self._items = []
def append(self, item):
"""Append an item to the Menu.
Args:
item: (MenuItem) the item to be appended.
"""
self._items.append(item)
def insert(self, index, item):
self._items.insert(index, item)
def num_items(self):
return len(self._items)
def captions(self):
return [item.caption for item in self._items]
def caption_to_item(self, caption):
"""Get a MenuItem from the caption.
Args:
caption: (str) The caption to look up.
Returns:
(MenuItem) The first-match menu item with the caption, if any.
Raises:
LookupError: If a menu item with the caption does not exist.
"""
captions = self.captions()
if caption not in captions:
raise LookupError("There is no menu item with the caption \"%s\"" %
caption)
return self._items[captions.index(caption)]
def format_as_single_line(self,
prefix=None,
divider=" | ",
enabled_item_attrs=None,
disabled_item_attrs=None):
"""Format the menu as a single-line RichTextLines object.
Args:
prefix: (str) String added to the beginning of the line.
divider: (str) The dividing string between the menu items.
enabled_item_attrs: (list or str) Attributes applied to each enabled
menu item, e.g., ["bold", "underline"].
disabled_item_attrs: (list or str) Attributes applied to each
disabled menu item, e.g., ["red"].
Returns:
(RichTextLines) A single-line output representing the menu, with
font_attr_segs marking the individual menu items.
"""
if (enabled_item_attrs is not None and
not isinstance(enabled_item_attrs, list)):
enabled_item_attrs = [enabled_item_attrs]
if (disabled_item_attrs is not None and
not isinstance(disabled_item_attrs, list)):
disabled_item_attrs = [disabled_item_attrs]
menu_line = prefix if prefix is not None else ""
attr_segs = []
for item in self._items:
menu_line += item.caption
item_name_begin = len(menu_line) - len(item.caption)
if item.is_enabled():
final_attrs = [item]
if enabled_item_attrs:
final_attrs.extend(enabled_item_attrs)
attr_segs.append((item_name_begin, len(menu_line), final_attrs))
else:
if disabled_item_attrs:
attr_segs.append(
(item_name_begin, len(menu_line), disabled_item_attrs))
menu_line += divider
return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
|
tensorflow-master
|
tensorflow/python/debug/cli/debugger_cli_common.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command parsing module for TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import re
import sys
_BRACKETS_PATTERN = re.compile(r"\[[^\]]*\]")
_QUOTES_PATTERN = re.compile(r"(\"[^\"]*\"|\'[^\']*\')")
_WHITESPACE_PATTERN = re.compile(r"\s+")
_NUMBER_PATTERN = re.compile(r"[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?")
class Interval(object):
"""Represents an interval between a start and end value."""
def __init__(self, start, start_included, end, end_included):
self.start = start
self.start_included = start_included
self.end = end
self.end_included = end_included
def contains(self, value):
if value < self.start or value == self.start and not self.start_included:
return False
if value > self.end or value == self.end and not self.end_included:
return False
return True
def __eq__(self, other):
return (self.start == other.start and
self.start_included == other.start_included and
self.end == other.end and
self.end_included == other.end_included)
def parse_command(command):
"""Parse command string into a list of arguments.
- Disregards whitespace inside double quotes and brackets.
- Strips paired leading and trailing double quotes in arguments.
- Splits the command at whitespace.
Nested double quotes and brackets are not handled.
Args:
command: (str) Input command.
Returns:
(list of str) List of arguments.
"""
command = command.strip()
if not command:
return []
brackets_intervals = [f.span() for f in _BRACKETS_PATTERN.finditer(command)]
quotes_intervals = [f.span() for f in _QUOTES_PATTERN.finditer(command)]
whitespaces_intervals = [
f.span() for f in _WHITESPACE_PATTERN.finditer(command)
]
if not whitespaces_intervals:
return [command]
arguments = []
idx0 = 0
for start, end in whitespaces_intervals + [(len(command), None)]:
# Skip whitespace stretches enclosed in brackets or double quotes.
if not any(interval[0] < start < interval[1]
for interval in brackets_intervals + quotes_intervals):
argument = command[idx0:start]
# Strip leading and trailing double quote if they are paired.
if (argument.startswith("\"") and argument.endswith("\"") or
argument.startswith("'") and argument.endswith("'")):
argument = argument[1:-1]
arguments.append(argument)
idx0 = end
return arguments
def extract_output_file_path(args):
"""Extract output file path from command arguments.
Args:
args: (list of str) command arguments.
Returns:
(list of str) Command arguments with the output file path part stripped.
(str or None) Output file path (if any).
Raises:
SyntaxError: If there is no file path after the last ">" character.
"""
if args and args[-1].endswith(">"):
raise SyntaxError("Redirect file path is empty")
elif args and args[-1].startswith(">"):
try:
_parse_interval(args[-1])
if len(args) > 1 and args[-2].startswith("-"):
output_file_path = None
else:
output_file_path = args[-1][1:]
args = args[:-1]
except ValueError:
output_file_path = args[-1][1:]
args = args[:-1]
elif len(args) > 1 and args[-2] == ">":
output_file_path = args[-1]
args = args[:-2]
elif args and args[-1].count(">") == 1:
gt_index = args[-1].index(">")
if gt_index > 0 and args[-1][gt_index - 1] == "=":
output_file_path = None
else:
output_file_path = args[-1][gt_index + 1:]
args[-1] = args[-1][:gt_index]
elif len(args) > 1 and args[-2].endswith(">"):
output_file_path = args[-1]
args = args[:-1]
args[-1] = args[-1][:-1]
else:
output_file_path = None
return args, output_file_path
def parse_tensor_name_with_slicing(in_str):
"""Parse tensor name, potentially suffixed by slicing string.
Args:
in_str: (str) Input name of the tensor, potentially followed by a slicing
string. E.g.: Without slicing string: "hidden/weights/Variable:0", with
slicing string: "hidden/weights/Variable:0[1, :]"
Returns:
(str) name of the tensor
(str) slicing string, if any. If no slicing string is present, return "".
"""
if in_str.count("[") == 1 and in_str.endswith("]"):
tensor_name = in_str[:in_str.index("[")]
tensor_slicing = in_str[in_str.index("["):]
else:
tensor_name = in_str
tensor_slicing = ""
return tensor_name, tensor_slicing
def validate_slicing_string(slicing_string):
"""Validate a slicing string.
Check if the input string contains only brackets, digits, commas and
colons that are valid characters in numpy-style array slicing.
Args:
slicing_string: (str) Input slicing string to be validated.
Returns:
(bool) True if and only if the slicing string is valid.
"""
return bool(re.search(r"^\[(\d|,|\s|:)+\]$", slicing_string))
def _parse_slices(slicing_string):
"""Construct a tuple of slices from the slicing string.
The string must be a valid slicing string.
Args:
slicing_string: (str) Input slicing string to be parsed.
Returns:
tuple(slice1, slice2, ...)
Raises:
ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.
"""
parsed = []
for slice_string in slicing_string[1:-1].split(","):
indices = slice_string.split(":")
if len(indices) == 1:
parsed.append(int(indices[0].strip()))
elif 2 <= len(indices) <= 3:
parsed.append(
slice(*[
int(index.strip()) if index.strip() else None for index in indices
]))
else:
raise ValueError("Invalid tensor-slicing string.")
return tuple(parsed)
def parse_indices(indices_string):
"""Parse a string representing indices.
For example, if the input is "[1, 2, 3]", the return value will be a list of
indices: [1, 2, 3]
Args:
indices_string: (str) a string representing indices. Can optionally be
surrounded by a pair of brackets.
Returns:
(list of int): Parsed indices.
"""
# Strip whitespace.
indices_string = re.sub(r"\s+", "", indices_string)
# Strip any brackets at the two ends.
if indices_string.startswith("[") and indices_string.endswith("]"):
indices_string = indices_string[1:-1]
return [int(element) for element in indices_string.split(",")]
def parse_ranges(range_string):
"""Parse a string representing numerical range(s).
Args:
range_string: (str) A string representing a numerical range or a list of
them. For example:
"[-1.0,1.0]", "[-inf, 0]", "[[-inf, -1.0], [1.0, inf]]"
Returns:
(list of list of float) A list of numerical ranges parsed from the input
string.
Raises:
ValueError: If the input doesn't represent a range or a list of ranges.
"""
range_string = range_string.strip()
if not range_string:
return []
if "inf" in range_string:
range_string = re.sub(r"inf", repr(sys.float_info.max), range_string)
ranges = ast.literal_eval(range_string)
if isinstance(ranges, list) and not isinstance(ranges[0], list):
ranges = [ranges]
# Verify that ranges is a list of list of numbers.
for item in ranges:
if len(item) != 2:
raise ValueError("Incorrect number of elements in range")
elif not isinstance(item[0], (int, float)):
raise ValueError("Incorrect type in the 1st element of range: %s" %
type(item[0]))
elif not isinstance(item[1], (int, float)):
raise ValueError("Incorrect type in the 2nd element of range: %s" %
type(item[0]))
return ranges
def parse_memory_interval(interval_str):
"""Convert a human-readable memory interval to a tuple of start and end value.
Args:
interval_str: (`str`) A human-readable str representing an interval
(e.g., "[10kB, 20kB]", "<100M", ">100G"). Only the units "kB", "MB", "GB"
are supported. The "B character at the end of the input `str` may be
omitted.
Returns:
`Interval` object where start and end are in bytes.
Raises:
ValueError: if the input is not valid.
"""
str_interval = _parse_interval(interval_str)
interval_start = 0
interval_end = float("inf")
if str_interval.start:
interval_start = parse_readable_size_str(str_interval.start)
if str_interval.end:
interval_end = parse_readable_size_str(str_interval.end)
if interval_start > interval_end:
raise ValueError(
"Invalid interval %s. Start of interval must be less than or equal "
"to end of interval." % interval_str)
return Interval(interval_start, str_interval.start_included,
interval_end, str_interval.end_included)
def parse_time_interval(interval_str):
"""Convert a human-readable time interval to a tuple of start and end value.
Args:
interval_str: (`str`) A human-readable str representing an interval
(e.g., "[10us, 20us]", "<100s", ">100ms"). Supported time suffixes are
us, ms, s.
Returns:
`Interval` object where start and end are in microseconds.
Raises:
ValueError: if the input is not valid.
"""
str_interval = _parse_interval(interval_str)
interval_start = 0
interval_end = float("inf")
if str_interval.start:
interval_start = parse_readable_time_str(str_interval.start)
if str_interval.end:
interval_end = parse_readable_time_str(str_interval.end)
if interval_start > interval_end:
raise ValueError(
"Invalid interval %s. Start must be before end of interval." %
interval_str)
return Interval(interval_start, str_interval.start_included,
interval_end, str_interval.end_included)
def _parse_interval(interval_str):
"""Convert a human-readable interval to a tuple of start and end value.
Args:
interval_str: (`str`) A human-readable str representing an interval
(e.g., "[1M, 2M]", "<100k", ">100ms"). The items following the ">", "<",
">=" and "<=" signs have to start with a number (e.g., 3.0, -2, .98).
The same requirement applies to the items in the parentheses or brackets.
Returns:
Interval object where start or end can be None
if the range is specified as "<N" or ">N" respectively.
Raises:
ValueError: if the input is not valid.
"""
interval_str = interval_str.strip()
if interval_str.startswith("<="):
if _NUMBER_PATTERN.match(interval_str[2:].strip()):
return Interval(start=None, start_included=False,
end=interval_str[2:].strip(), end_included=True)
else:
raise ValueError("Invalid value string after <= in '%s'" % interval_str)
if interval_str.startswith("<"):
if _NUMBER_PATTERN.match(interval_str[1:].strip()):
return Interval(start=None, start_included=False,
end=interval_str[1:].strip(), end_included=False)
else:
raise ValueError("Invalid value string after < in '%s'" % interval_str)
if interval_str.startswith(">="):
if _NUMBER_PATTERN.match(interval_str[2:].strip()):
return Interval(start=interval_str[2:].strip(), start_included=True,
end=None, end_included=False)
else:
raise ValueError("Invalid value string after >= in '%s'" % interval_str)
if interval_str.startswith(">"):
if _NUMBER_PATTERN.match(interval_str[1:].strip()):
return Interval(start=interval_str[1:].strip(), start_included=False,
end=None, end_included=False)
else:
raise ValueError("Invalid value string after > in '%s'" % interval_str)
if (not interval_str.startswith(("[", "("))
or not interval_str.endswith(("]", ")"))):
raise ValueError(
"Invalid interval format: %s. Valid formats are: [min, max], "
"(min, max), <max, >min" % interval_str)
interval = interval_str[1:-1].split(",")
if len(interval) != 2:
raise ValueError(
"Incorrect interval format: %s. Interval should specify two values: "
"[min, max] or (min, max)." % interval_str)
start_item = interval[0].strip()
if not _NUMBER_PATTERN.match(start_item):
raise ValueError("Invalid first item in interval: '%s'" % start_item)
end_item = interval[1].strip()
if not _NUMBER_PATTERN.match(end_item):
raise ValueError("Invalid second item in interval: '%s'" % end_item)
return Interval(start=start_item,
start_included=(interval_str[0] == "["),
end=end_item,
end_included=(interval_str[-1] == "]"))
def parse_readable_size_str(size_str):
"""Convert a human-readable str representation to number of bytes.
Only the units "kB", "MB", "GB" are supported. The "B character at the end
of the input `str` may be omitted.
Args:
size_str: (`str`) A human-readable str representing a number of bytes
(e.g., "0", "1023", "1.1kB", "24 MB", "23GB", "100 G".
Returns:
(`int`) The parsed number of bytes.
Raises:
ValueError: on failure to parse the input `size_str`.
"""
size_str = size_str.strip()
if size_str.endswith("B"):
size_str = size_str[:-1]
if size_str.isdigit():
return int(size_str)
elif size_str.endswith("k"):
return int(float(size_str[:-1]) * 1024)
elif size_str.endswith("M"):
return int(float(size_str[:-1]) * 1048576)
elif size_str.endswith("G"):
return int(float(size_str[:-1]) * 1073741824)
else:
raise ValueError("Failed to parsed human-readable byte size str: \"%s\"" %
size_str)
def parse_readable_time_str(time_str):
"""Parses a time string in the format N, Nus, Nms, Ns.
Args:
time_str: (`str`) string consisting of an integer time value optionally
followed by 'us', 'ms', or 's' suffix. If suffix is not specified,
value is assumed to be in microseconds. (e.g. 100us, 8ms, 5s, 100).
Returns:
Microseconds value.
"""
def parse_positive_float(value_str):
value = float(value_str)
if value < 0:
raise ValueError(
"Invalid time %s. Time value must be positive." % value_str)
return value
time_str = time_str.strip()
if time_str.endswith("us"):
return int(parse_positive_float(time_str[:-2]))
elif time_str.endswith("ms"):
return int(parse_positive_float(time_str[:-2]) * 1e3)
elif time_str.endswith("s"):
return int(parse_positive_float(time_str[:-1]) * 1e6)
return int(parse_positive_float(time_str))
def evaluate_tensor_slice(tensor, tensor_slicing):
"""Call eval on the slicing of a tensor, with validation.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
Returns:
(numpy ndarray) The sliced tensor.
Raises:
ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.
"""
_ = tensor
if not validate_slicing_string(tensor_slicing):
raise ValueError("Invalid tensor-slicing string.")
return tensor[_parse_slices(tensor_slicing)]
def get_print_tensor_argparser(description):
"""Get an ArgumentParser for a command that prints tensor values.
Examples of such commands include print_tensor and print_feed.
Args:
description: Description of the ArgumentParser.
Returns:
An instance of argparse.ArgumentParser.
"""
ap = argparse.ArgumentParser(
description=description, usage=argparse.SUPPRESS)
ap.add_argument(
"tensor_name",
type=str,
help="Name of the tensor, followed by any slicing indices, "
"e.g., hidden1/Wx_plus_b/MatMul:0, "
"hidden1/Wx_plus_b/MatMul:0[1, :]")
ap.add_argument(
"-n",
"--number",
dest="number",
type=int,
default=-1,
help="0-based dump number for the specified tensor. "
"Required for tensor with multiple dumps.")
ap.add_argument(
"-r",
"--ranges",
dest="ranges",
type=str,
default="",
help="Numerical ranges to highlight tensor elements in. "
"Examples: -r 0,1e-8, -r [-0.1,0.1], "
"-r \"[[-inf, -0.1], [0.1, inf]]\"")
ap.add_argument(
"-a",
"--all",
dest="print_all",
action="store_true",
help="Print the tensor in its entirety, i.e., do not use ellipses.")
ap.add_argument(
"-s",
"--numeric_summary",
action="store_true",
help="Include summary for non-empty tensors of numeric (int*, float*, "
"complex*) and Boolean types.")
ap.add_argument(
"-w",
"--write_path",
type=str,
default="",
help="Path of the numpy file to write the tensor data to, using "
"numpy.save().")
return ap
|
tensorflow-master
|
tensorflow/python/debug/cli/command_parser.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Readline-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import readline
import six
from tensorflow.python.debug.cli import base_ui
from tensorflow.python.debug.cli import debugger_cli_common
class ReadlineUI(base_ui.BaseUI):
"""Readline-based Command-line UI."""
def __init__(self, on_ui_exit=None, config=None):
base_ui.BaseUI.__init__(self, on_ui_exit=on_ui_exit, config=config)
self._init_input()
def _init_input(self):
readline.parse_and_bind("set editing-mode emacs")
# Disable default readline delimiter in order to receive the full text
# (not just the last word) in the completer.
readline.set_completer_delims("\n")
readline.set_completer(self._readline_complete)
readline.parse_and_bind("tab: complete")
self._input = six.moves.input
def _readline_complete(self, text, state):
context, prefix, except_last_word = self._analyze_tab_complete_input(text)
candidates, _ = self._tab_completion_registry.get_completions(context,
prefix)
candidates = [(except_last_word + candidate) for candidate in candidates]
return candidates[state]
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
print(title)
if init_command is not None:
self._dispatch_command(init_command)
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
return exit_token
def _ui_loop(self):
while True:
command = self._get_user_command()
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
def _get_user_command(self):
print("")
return self._input(self.CLI_PROMPT).strip()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
print(str(e))
return
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=None)
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
print("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
print("Failed to write output to %s" % output_file_path)
def _display_output(self, screen_output):
for line in screen_output.lines:
print(line)
|
tensorflow-master
|
tensorflow/python/debug/cli/readline_ui.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the readline-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import readline_ui
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class MockReadlineUI(readline_ui.ReadlineUI):
"""Test subclass of ReadlineUI that bypasses terminal manipulations."""
def __init__(self, on_ui_exit=None, command_sequence=None):
readline_ui.ReadlineUI.__init__(
self, on_ui_exit=on_ui_exit,
config=cli_config.CLIConfig(config_file_path=tempfile.mktemp()))
self._command_sequence = command_sequence
self._command_counter = 0
self.observers = {"screen_outputs": []}
def _get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_counter += 1
return command
def _display_output(self, screen_output):
self.observers["screen_outputs"].append(screen_output)
class CursesTest(test_util.TensorFlowTestCase):
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
parsed = ap.parse_args(args)
lines = ["bar"] * parsed.num_times
return debugger_cli_common.RichTextLines(lines)
def testUIFactoryCreatesReadlineUI(self):
ui = ui_factory.get_ui("readline")
self.assertIsInstance(ui, readline_ui.ReadlineUI)
def testUIFactoryRaisesExceptionOnInvalidUIType(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'foobar'"):
ui_factory.get_ui("foobar")
def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'readline'"):
ui_factory.get_ui("readline", available_ui_types=["curses"])
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.observers["screen_outputs"]))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockReadlineUI(command_sequence=["", "exit"])
ui.run_ui()
self.assertEqual(1, len(ui.observers["screen_outputs"]))
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 60, screen_outputs[0].lines)
def testRunUIWithValidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "babble -n 6", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["bar"] * 6, screen_outputs[1].lines)
def testRunUIWithInvalidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "wobble", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"wobble\""],
screen_outputs[1].lines)
def testRunUIWithOnUIExitCallback(self):
observer = {"callback_invoked": False}
def callback_for_test():
observer["callback_invoked"] = True
ui = MockReadlineUI(on_ui_exit=callback_for_test, command_sequence=["exit"])
self.assertFalse(observer["callback_invoked"])
ui.run_ui()
self.assertEqual(0, len(ui.observers["screen_outputs"]))
self.assertTrue(observer["callback_invoked"])
def testIncompleteRedirectWorks(self):
output_path = tempfile.mktemp()
ui = MockReadlineUI(
command_sequence=["babble -n 2 > %s" % output_path, "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 2, screen_outputs[0].lines)
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
def testConfigSetAndShow(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=[
"config set graph_recursion_depth 5", "config show", "exit"])
ui.run_ui()
outputs = ui.observers["screen_outputs"]
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: 5"], outputs[1].lines[:3])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/cli/readline_ui_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfdbg example: debugging tf.keras models training on tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
def main(_):
# Create a dummy dataset.
num_examples = 8
steps_per_epoch = 2
input_dims = 3
output_dims = 1
xs = np.zeros([num_examples, input_dims])
ys = np.zeros([num_examples, output_dims])
dataset = tf.data.Dataset.from_tensor_slices(
(xs, ys)).repeat(num_examples).batch(int(num_examples / steps_per_epoch))
sess = tf.Session()
if FLAGS.debug:
# Use the command-line interface (CLI) of tfdbg.
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
elif FLAGS.tensorboard_debug_address:
# Use the TensorBoard Debugger Plugin (GUI of tfdbg).
sess = tf_debug.TensorBoardDebugWrapperSession(
sess, FLAGS.tensorboard_debug_address)
tf.keras.backend.set_session(sess)
# Create a dummy model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(1, input_shape=[input_dims])])
model.compile(loss="mse", optimizer="sgd")
# Train the model using the dummy dataset created above.
model.fit(dataset, epochs=FLAGS.epochs, steps_per_epoch=steps_per_epoch)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline).")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
parser.add_argument(
"--epochs",
type=int,
default=2,
help="Number of epochs to train the model for.")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/examples/debug_keras.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debug the tf-learn iris example, based on the tf-learn tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import tensorflow as tf
from tensorflow.python import debug as tf_debug
_IRIS_INPUT_DIM = 4
def main(_):
# Generate some fake Iris data.
# It is okay for this example because this example is about how to use the
# debugger, not how to use machine learning to solve the Iris classification
# problem.
def training_input_fn():
return ({"features": tf.random_normal([128, 4])},
tf.random_uniform([128], minval=0, maxval=3, dtype=tf.int32))
def test_input_fn():
return ({"features": tf.random_normal([32, 4])},
tf.random_uniform([32], minval=0, maxval=3, dtype=tf.int32))
feature_columns = [
tf.feature_column.numeric_column("features", shape=(4,))]
# Build 3 layer DNN with 10, 20, 10 units respectively.
model_dir = FLAGS.model_dir or tempfile.mkdtemp(prefix="debug_tflearn_iris_")
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
hooks = []
if FLAGS.debug:
hooks.append(tf_debug.LocalCLIDebugHook(ui_type=FLAGS.ui_type,
dump_root=FLAGS.dump_root))
elif FLAGS.tensorboard_debug_address:
hooks.append(tf_debug.TensorBoardDebugHook(FLAGS.tensorboard_debug_address))
# Train model, using tfdbg hook.
classifier.train(training_input_fn,
steps=FLAGS.train_steps,
hooks=hooks)
# Evaluate accuracy, using tfdbg hook.
accuracy_score = classifier.evaluate(test_input_fn,
steps=FLAGS.eval_steps,
hooks=hooks)["accuracy"]
print("After training %d steps, Accuracy = %f" %
(FLAGS.train_steps, accuracy_score))
# Make predictions, using tfdbg hook.
predict_results = classifier.predict(test_input_fn, hooks=hooks)
print("A prediction result: %s" % next(predict_results))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/iris_data",
help="Directory to save the training and test data in.")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Directory to save the trained model in.")
parser.add_argument(
"--train_steps",
type=int,
default=10,
help="Number of steps to run training for.")
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of steps to run evaluation foir.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--dump_root",
type=str,
default="",
help="Optional custom root directory for temporary debug dump data")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/examples/debug_tflearn_iris.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of debugging TensorFlow runtime errors using tfdbg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
ph_float = tf.placeholder(tf.float32, name="ph_float")
x = tf.transpose(ph_float, name="x")
v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
m = tf.constant(
np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
dtype=tf.float32,
name="m")
y = tf.matmul(m, x, name="y")
z = tf.matmul(m, v, name="z")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
if FLAGS.error == "shape_mismatch":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
elif FLAGS.error == "uninitialized_variable":
print(sess.run(z))
elif FLAGS.error == "no_error":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
else:
raise ValueError("Unrecognized error type: " + FLAGS.error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--error",
type=str,
default="shape_mismatch",
help="""\
Type of the error to generate (shape_mismatch | uninitialized_variable |
no_error).\
""")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/examples/debug_errors.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo of the tfdbg curses UI: A TF network computing Fibonacci sequence."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python import debug as tf_debug
FLAGS = None
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
n0 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_00")
n1 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_01")
for i in xrange(2, FLAGS.length):
n0, n1 = n1, tf.add(n0, n1, name="node_%.2d" % i)
sess.run(tf.global_variables_initializer())
# Wrap the TensorFlow Session object for debugging.
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
def has_negative(_, tensor):
return np.any(tensor < 0)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
sess.add_tensor_filter("has_negative", has_negative)
elif FLAGS.tensorboard_debug_address:
sess = tf_debug.TensorBoardDebugWrapperSession(
sess, FLAGS.tensorboard_debug_address)
print("Fibonacci number at position %d:\n%s" %
(FLAGS.length, sess.run(n1)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--tensor_size",
type=int,
default=1,
help="""\
Size of tensor. E.g., if the value is 30, the tensors will have shape
[30, 30].\
""")
parser.add_argument(
"--length",
type=int,
default=20,
help="Length of the fibonacci sequence to compute.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Use TensorFlow Debugger (tfdbg). Mutually exclusive with the "
"--tensorboard_debug_address flag.")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/examples/debug_fibonacci.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo of the tfdbg curses CLI: Locating the source of bad numerical values.
The neural network in this demo is larged based on the tutorial at:
tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
But modifications are made so that problematic numerical values (infs and nans)
appear in nodes of the graph during training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python import debug as tf_debug
IMAGE_SIZE = 28
HIDDEN_SIZE = 500
NUM_LABELS = 10
RAND_SEED = 42
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
def feed_dict(train):
if train or FLAGS.fake_data:
xs, ys = mnist.train.next_batch(FLAGS.train_batch_size,
fake_data=FLAGS.fake_data)
else:
xs, ys = mnist.test.images, mnist.test.labels
return {x: xs, y_: ys}
sess = tf.InteractiveSession()
# Create the MNIST neural network graph.
# Input placeholders.
with tf.name_scope("input"):
x = tf.placeholder(
tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE], name="x-input")
y_ = tf.placeholder(tf.float32, [None, NUM_LABELS], name="y-input")
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1, seed=RAND_SEED)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer."""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope("weights"):
weights = weight_variable([input_dim, output_dim])
with tf.name_scope("biases"):
biases = bias_variable([output_dim])
with tf.name_scope("Wx_plus_b"):
preactivate = tf.matmul(input_tensor, weights) + biases
activations = act(preactivate)
return activations
hidden = nn_layer(x, IMAGE_SIZE**2, HIDDEN_SIZE, "hidden")
logits = nn_layer(hidden, HIDDEN_SIZE, NUM_LABELS, "output", tf.identity)
y = tf.nn.softmax(logits)
with tf.name_scope("cross_entropy"):
# The following line is the culprit of the bad numerical values that appear
# during training of this graph. Log of zero gives inf, which is first seen
# in the intermediate tensor "cross_entropy/Log:0" during the 4th run()
# call. A multiplication of the inf values with zeros leads to nans,
# which is first in "cross_entropy/mul:0".
#
# You can use the built-in, numerically-stable implementation to fix this
# issue:
# diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits)
diff = -(y_ * tf.log(y))
with tf.name_scope("total"):
cross_entropy = tf.reduce_mean(diff)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy)
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
if FLAGS.debug and FLAGS.tensorboard_debug_address:
raise ValueError(
"The --debug and --tensorboard_debug_address flags are mutually "
"exclusive.")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
elif FLAGS.tensorboard_debug_address:
sess = tf_debug.TensorBoardDebugWrapperSession(
sess, FLAGS.tensorboard_debug_address)
# Add this point, sess is a debug wrapper around the actual Session if
# FLAGS.debug is true. In that case, calling run() will launch the CLI.
for i in range(FLAGS.max_steps):
acc = sess.run(accuracy, feed_dict=feed_dict(False))
print("Accuracy at step %d: %s" % (i, acc))
sess.run(train_step, feed_dict=feed_dict(True))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--max_steps",
type=int,
default=10,
help="Number of steps to run trainer.")
parser.add_argument(
"--train_batch_size",
type=int,
default=100,
help="Batch size used during training.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.025,
help="Initial learning rate.")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/mnist_data",
help="Directory for storing data")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--fake_data",
type="bool",
nargs="?",
const=True,
default=False,
help="Use fake MNIST data for unit testing")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training. "
"Mutually exclusive with the --tensorboard_debug_address flag.")
parser.add_argument(
"--tensorboard_debug_address",
type=str,
default=None,
help="Connect to the TensorBoard Debugger Plugin backend specified by "
"the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
"--debug flag.")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/examples/debug_mnist.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfdbg module debug_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import shutil
import tempfile
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class DeviceNamePathConversionTest(test_util.TensorFlowTestCase):
def testDeviceNameToDevicePath(self):
self.assertEqual(
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_ps,replica_1,task_2,cpu_0",
debug_data.device_name_to_device_path("/job:ps/replica:1/task:2/cpu:0"))
def testDevicePathToDeviceName(self):
self.assertEqual(
"/job:ps/replica:1/task:2/cpu:0",
debug_data.device_path_to_device_name(
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_ps,replica_1,task_2,cpu_0"))
class HasNanOrInfTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dummy_datum = dummy_datum = debug_data.DebugTensorDatum(
"/foo", "bar_0_DebugIdentity_42")
def testNaN(self):
a = np.array([np.nan, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInf(self):
a = np.array([np.inf, np.inf, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNanAndInf(self):
a = np.array([np.inf, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNoNanOrInf(self):
a = np.array([0.0, 0.0, 7.0])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testEmpty(self):
a = np.array([])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInconvertibleTensorProto(self):
self.assertFalse(debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(tensor_pb2.TensorProto(),
initialized=False)))
self.assertFalse(debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(tensor_pb2.TensorProto(),
initialized=True)))
def testDTypeComplexWorks(self):
a = np.array([1j, 3j, 3j, 7j], dtype=np.complex128)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
b = np.array([1j, 3j, 3j, 7j, np.nan], dtype=np.complex128)
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, b))
def testDTypeIntegerWorks(self):
a = np.array([1, 3, 3, 7], dtype=np.int16)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeStringGivesFalse(self):
"""isnan and isinf are not applicable to strings."""
a = np.array(["s", "p", "a", "m"])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeObjectGivesFalse(self):
dt = np.dtype([("spam", np.str_, 16), ("eggs", np.float64, (2,))])
a = np.array([("spam", (8.0, 7.0)), ("eggs", (6.0, 5.0))], dtype=dt)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
class DebugTensorDatumTest(test_util.TensorFlowTestCase):
def testDebugDatum(self):
dump_root = "/tmp/tfdbg_1"
debug_dump_rel_path = (
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0" +
"/ns1/ns2/node_a_1_2_DebugIdentity_1472563253536385")
datum = debug_data.DebugTensorDatum(dump_root, debug_dump_rel_path)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertEqual("ns1/ns2/node_a_1", datum.node_name)
self.assertEqual(2, datum.output_slot)
self.assertEqual("ns1/ns2/node_a_1:2", datum.tensor_name)
self.assertEqual(1472563253536385, datum.timestamp)
self.assertEqual("ns1/ns2/node_a_1:2:DebugIdentity", datum.watch_key)
self.assertEqual(
os.path.join(dump_root, debug_dump_rel_path), datum.file_path)
self.assertEqual(
"{DebugTensorDatum (/job:localhost/replica:0/task:0/cpu:0) "
"%s:%d @ %s @ %d}" % (datum.node_name,
datum.output_slot,
datum.debug_op,
datum.timestamp), str(datum))
self.assertEqual(
"{DebugTensorDatum (/job:localhost/replica:0/task:0/cpu:0) "
"%s:%d @ %s @ %d}" % (datum.node_name,
datum.output_slot,
datum.debug_op,
datum.timestamp), repr(datum))
def testDumpSizeBytesIsNoneForNonexistentFilePath(self):
dump_root = "/tmp/tfdbg_1"
debug_dump_rel_path = "ns1/ns2/node_foo_1_2_DebugIdentity_1472563253536385"
datum = debug_data.DebugTensorDatum(dump_root, debug_dump_rel_path)
self.assertIsNone(datum.dump_size_bytes)
class DebugDumpDirTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dump_root = tempfile.mktemp()
os.mkdir(self._dump_root)
def tearDown(self):
# Tear down temporary dump directory.
shutil.rmtree(self._dump_root)
def _makeDataDirWithMultipleDevicesAndDuplicateNodeNames(self):
cpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0")
gpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,device_GPU_0")
gpu_1_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,device_GPU_1")
os.makedirs(cpu_0_dir)
os.makedirs(gpu_0_dir)
os.makedirs(gpu_1_dir)
open(os.path.join(
cpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536386"), "wb")
open(os.path.join(
gpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536385"), "wb")
open(os.path.join(
gpu_1_dir, "node_foo_1_2_DebugIdentity_1472563253536387"), "wb")
def testDebugDumpDir_nonexistentDumpRoot(self):
with self.assertRaisesRegexp(IOError, "does not exist"):
debug_data.DebugDumpDir(tempfile.mktemp() + "_foo")
def testDebugDumpDir_invalidFileNamingPattern(self):
# File name with too few underscores should lead to an exception.
device_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0")
os.makedirs(device_dir)
open(os.path.join(device_dir, "node1_DebugIdentity_1234"), "wb")
with self.assertRaisesRegexp(ValueError,
"does not conform to the naming pattern"):
debug_data.DebugDumpDir(self._dump_root)
def testDebugDumpDir_validDuplicateNodeNamesWithMultipleDevices(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
dump_dir = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1])
self.assertItemsEqual(
["/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1"], dump_dir.devices())
self.assertEqual(1472563253536385, dump_dir.t0)
self.assertEqual(3, dump_dir.size)
with self.assertRaisesRegexp(
ValueError, r"Invalid device name: "):
dump_dir.nodes("/job:localhost/replica:0/task:0/device:GPU:2")
self.assertItemsEqual(["node_foo_1", "node_foo_1", "node_foo_1"],
dump_dir.nodes())
self.assertItemsEqual(
["node_foo_1"],
dump_dir.nodes(device_name="/job:localhost/replica:0/task:0/cpu:0"))
def testDuplicateNodeNamesInGraphDefOfSingleDeviceRaisesException(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
node = graph_gpu_1.node.add() # Here is the duplicate.
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
with self.assertRaisesRegexp(
ValueError, r"Duplicate node name on device "):
debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1])
def testDebugDumpDir_emptyDumpDir(self):
dump_dir = debug_data.DebugDumpDir(self._dump_root)
self.assertIsNone(dump_dir.t0)
self.assertEqual([], dump_dir.dumped_tensor_data)
def testDebugDumpDir_usesGfileGlob(self):
if platform.system() == "Windows":
self.skipTest("gfile.Glob is not used on Windows.")
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
def fake_gfile_glob(glob_pattern):
del glob_pattern
return []
with test.mock.patch.object(
gfile, "Glob", side_effect=fake_gfile_glob, autospec=True) as fake:
debug_data.DebugDumpDir(self._dump_root)
expected_calls = [
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.CORE_METADATA_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.FETCHES_INFO_FILE_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.FEED_KEYS_INFO_FILE_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.DEVICE_TAG + "*")))]
fake.assert_has_calls(expected_calls, any_order=True)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_data_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""gRPC debug server in Python."""
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import threading
import time
from concurrent import futures
import grpc
from six.moves import queue
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_service_pb2_grpc
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
DebugWatch = collections.namedtuple("DebugWatch",
["node_name", "output_slot", "debug_op"])
def _state_change(new_state, node_name, output_slot, debug_op):
state_change = debug_service_pb2.EventReply.DebugOpStateChange()
state_change.state = new_state
state_change.node_name = node_name
state_change.output_slot = output_slot
state_change.debug_op = debug_op
return state_change
class EventListenerBaseStreamHandler(object):
"""Per-stream handler of EventListener gRPC streams."""
def __init__(self):
"""Constructor of EventListenerBaseStreamHandler."""
def on_core_metadata_event(self, event):
"""Callback for core metadata.
Args:
event: The Event proto that carries a JSON string in its
`log_message.message` field.
Returns:
`None` or an `EventReply` proto to be sent back to the client. If `None`,
an `EventReply` proto construct with the default no-arg constructor will
be sent back to the client.
"""
raise NotImplementedError(
"on_core_metadata_event() is not implemented in the base servicer "
"class")
def on_graph_def(self, graph_def, device_name, wall_time):
"""Callback for Event proto received through the gRPC stream.
This Event proto carries a GraphDef, encoded as bytes, in its graph_def
field.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
Returns:
`None` or an `EventReply` proto to be sent back to the client. If `None`,
an `EventReply` proto construct with the default no-arg constructor will
be sent back to the client.
"""
raise NotImplementedError(
"on_graph_def() is not implemented in the base servicer class")
def on_value_event(self, event):
"""Callback for Event proto received through the gRPC stream.
This Event proto carries a Tensor in its summary.value[0] field.
Args:
event: The Event proto from the stream to be processed.
"""
raise NotImplementedError(
"on_value_event() is not implemented in the base servicer class")
class EventListenerBaseServicer(debug_service_pb2_grpc.EventListenerServicer):
"""Base Python class for gRPC debug server."""
def __init__(self, server_port, stream_handler_class):
"""Constructor.
Args:
server_port: (int) Port number to bind to.
stream_handler_class: A class of the base class
`EventListenerBaseStreamHandler` that will be used to constructor
stream handler objects during `SendEvents` calls.
"""
self._server_port = server_port
self._stream_handler_class = stream_handler_class
self._server_lock = threading.Lock()
self._server_started = False
self._stop_requested = False
self._debug_ops_state_change_queue = queue.Queue()
self._gated_grpc_debug_watches = set()
self._breakpoints = set()
def SendEvents(self, request_iterator, context):
"""Implementation of the SendEvents service method.
This method receives streams of Event protos from the client, and processes
them in ways specified in the on_event() callback. The stream is
bi-directional, but currently only the client-to-server stream (i.e., the
stream from the debug ops to the server) is used.
Args:
request_iterator: The incoming stream of Event protos.
context: Server context.
Raises:
ValueError: If there are more than one core metadata events.
Yields:
An empty stream of responses.
"""
core_metadata_count = 0
# A map from GraphDef hash to a list of received chunks.
graph_def_chunks = {}
tensor_chunks = {}
stream_handler = None
for event in request_iterator:
if not stream_handler:
stream_handler = self._stream_handler_class()
if event.summary and event.summary.value:
# An Event proto carrying a tensor value.
maybe_tensor_event = self._process_tensor_event_in_chunks(
event, tensor_chunks)
if maybe_tensor_event:
event_reply = stream_handler.on_value_event(maybe_tensor_event)
if event_reply is not None:
yield self._process_debug_op_state_changes(event_reply)
else:
# Non-tensor-value Event.
if event.graph_def:
# GraphDef-carrying Event.
maybe_graph_def, maybe_device_name, maybe_wall_time = (
self._process_encoded_graph_def_in_chunks(
event, graph_def_chunks))
if maybe_graph_def:
reply = stream_handler.on_graph_def(
maybe_graph_def, maybe_device_name, maybe_wall_time)
yield self._process_debug_op_state_changes(reply)
elif event.log_message.message:
# Core metadata-carrying Event.
core_metadata_count += 1
if core_metadata_count > 1:
raise ValueError(
"Expected one core metadata event; received multiple")
reply = stream_handler.on_core_metadata_event(event)
yield self._process_debug_op_state_changes(reply)
def _process_debug_op_state_changes(self, event_reply=None):
"""Dequeue and process all the queued debug-op state change protos.
Include all the debug-op state change protos in a `EventReply` proto.
Args:
event_reply: An `EventReply` to add the `DebugOpStateChange` protos to,
or `None`.
Returns:
An `EventReply` proto with the dequeued `DebugOpStateChange` protos (if
any) added.
"""
if event_reply is None:
event_reply = debug_service_pb2.EventReply()
while not self._debug_ops_state_change_queue.empty():
state_change = self._debug_ops_state_change_queue.get()
debug_node_key = (state_change.node_name, state_change.output_slot,
state_change.debug_op)
if (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE):
logging.info("Adding breakpoint %s:%d:%s", state_change.node_name,
state_change.output_slot, state_change.debug_op)
self._breakpoints.add(debug_node_key)
elif (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY):
logging.info("Adding watchpoint %s:%d:%s", state_change.node_name,
state_change.output_slot, state_change.debug_op)
if debug_node_key in self._breakpoints:
self._breakpoints.discard(debug_node_key)
elif (state_change.state ==
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED):
logging.info("Removing watchpoint or breakpoint: %s:%d:%s",
state_change.node_name, state_change.output_slot,
state_change.debug_op)
if debug_node_key in self._breakpoints:
self._breakpoints.discard(debug_node_key)
else:
logging.warn(
"Attempting to remove a non-existent debug node key: %s",
debug_node_key)
new_state_change = event_reply.debug_op_state_changes.add()
new_state_change.CopyFrom(state_change)
return event_reply
def _process_tensor_event_in_chunks(self, event, tensor_chunks):
"""Possibly reassemble event chunks.
Due to gRPC's message size limit, a large tensor can be encapsulated in
multiple Event proto chunks to be sent through the debugger stream. This
method keeps track of the chunks that have arrived, reassemble all chunks
corresponding to a tensor when they have arrived and return the reassembled
Event proto.
Args:
event: The single Event proto that has arrived.
tensor_chunks: A dict used to keep track of the Event protos that have
arrived but haven't been reassembled.
Returns:
If all Event protos corresponding to a tensor have arrived, returns the
reassembled Event proto. Otherwise, return None.
"""
value = event.summary.value[0]
debugger_plugin_metadata = json.loads(
compat.as_text(value.metadata.plugin_data.content))
device_name = debugger_plugin_metadata["device"]
num_chunks = debugger_plugin_metadata["numChunks"]
chunk_index = debugger_plugin_metadata["chunkIndex"]
if num_chunks <= 1:
return event
debug_node_name = value.node_name
timestamp = int(event.wall_time)
tensor_key = "%s_%s_%d" % (device_name, debug_node_name, timestamp)
if tensor_key not in tensor_chunks:
tensor_chunks[tensor_key] = [None] * num_chunks
chunks = tensor_chunks[tensor_key]
if value.tensor.tensor_content:
chunks[chunk_index] = value.tensor
elif value.tensor.string_val:
chunks[chunk_index] = event
if None not in chunks:
if value.tensor.tensor_content:
event.summary.value[0].tensor.tensor_content = b"".join(
chunk.tensor_content for chunk in chunks)
del tensor_chunks[tensor_key]
return event
elif value.tensor.string_val:
merged_event = chunks[0]
for chunk in chunks[1:]:
merged_event.summary.value[0].tensor.string_val.extend(
list(chunk.summary.value[0].tensor.string_val))
return merged_event
def _process_encoded_graph_def_in_chunks(self,
event,
graph_def_chunks):
"""Process an Event proto containing a chunk of encoded GraphDef.
Args:
event: the Event proto containing the chunk of encoded GraphDef.
graph_def_chunks: A dict mapping keys for GraphDefs (i.e.,
"<graph_def_hash>,<device_name>,<wall_time>") to a list of chunks of
encoded GraphDefs.
Returns:
If all chunks of the GraphDef have arrived,
return decoded GraphDef proto, device name, wall_time.
Otherwise,
return None, None, None.
"""
graph_def = graph_pb2.GraphDef()
index_bar_0 = event.graph_def.find(b"|")
index_bar_1 = event.graph_def.find(b"|", index_bar_0 + 1)
index_bar_2 = event.graph_def.find(b"|", index_bar_1 + 1)
graph_def_hash_device_timestamp = event.graph_def[:index_bar_0]
chunk_index = int(event.graph_def[index_bar_0 + 1 : index_bar_1])
num_chunks = int(event.graph_def[index_bar_1 + 1 : index_bar_2])
if graph_def_hash_device_timestamp not in graph_def_chunks:
graph_def_chunks[graph_def_hash_device_timestamp] = [None] * num_chunks
graph_def_chunks[graph_def_hash_device_timestamp][
chunk_index] = event.graph_def[index_bar_2 + 1:]
if all(graph_def_chunks[graph_def_hash_device_timestamp]):
device_name = graph_def_hash_device_timestamp.split(b",")[1]
wall_time = int(graph_def_hash_device_timestamp.split(b",")[2])
graph_def.ParseFromString(
b"".join(graph_def_chunks[graph_def_hash_device_timestamp]))
del graph_def_chunks[graph_def_hash_device_timestamp]
self._process_graph_def(graph_def)
return graph_def, device_name, wall_time
else:
return None, None, None
def _process_graph_def(self, graph_def):
for node_def in graph_def.node:
if (debug_graphs.is_debug_node(node_def.name) and
node_def.attr["gated_grpc"].b):
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node_def.name))
self._gated_grpc_debug_watches.add(
DebugWatch(node_name, output_slot, debug_op))
def run_server(self, blocking=True):
"""Start running the server.
Args:
blocking: If `True`, block until `stop_server()` is invoked.
Raises:
ValueError: If server stop has already been requested, or if the server
has already started running.
"""
self._server_lock.acquire()
try:
if self._stop_requested:
raise ValueError("Server has already stopped")
if self._server_started:
raise ValueError("Server has already started running")
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
debug_service_pb2_grpc.add_EventListenerServicer_to_server(self,
self.server)
self.server.add_insecure_port("[::]:%d" % self._server_port)
self.server.start()
self._server_started = True
finally:
self._server_lock.release()
if blocking:
while not self._stop_requested:
time.sleep(1.0)
def stop_server(self, grace=1.0):
"""Request server stopping.
Once stopped, server cannot be stopped or started again. This method is
non-blocking. Call `wait()` on the returned event to block until the server
has completely stopped.
Args:
grace: Grace period in seconds to be used when calling `server.stop()`.
Raises:
ValueError: If server stop has already been requested, or if the server
has not started running yet.
Returns:
A threading.Event that will be set when the server has completely stopped.
"""
self._server_lock.acquire()
try:
if not self._server_started:
raise ValueError("Server has not started running")
if self._stop_requested:
raise ValueError("Server has already stopped")
self._stop_requested = True
return self.server.stop(grace=grace)
finally:
self._server_lock.release()
def request_watch(self, node_name, output_slot, debug_op, breakpoint=False):
"""Request enabling a debug tensor watchpoint or breakpoint.
This will let the server send a EventReply to the client side
(i.e., the debugged TensorFlow runtime process) to request adding a watch
key (i.e., <node_name>:<output_slot>:<debug_op>) to the list of enabled
watch keys. The list applies only to debug ops with the attribute
gated_grpc=True.
To disable the watch, use `request_unwatch()`.
Args:
node_name: (`str`) name of the node that the to-be-watched tensor belongs
to, e.g., "hidden/Weights".
output_slot: (`int`) output slot index of the tensor to watch.
debug_op: (`str`) name of the debug op to enable. This should not include
any attribute substrings.
breakpoint: (`bool`) Iff `True`, the debug op will block and wait until it
receives an `EventReply` response from the server. The `EventReply`
proto may carry a TensorProto that modifies the value of the debug op's
output tensor.
"""
self._debug_ops_state_change_queue.put(
_state_change(
debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE
if breakpoint
else debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY,
node_name, output_slot, debug_op))
def request_unwatch(self, node_name, output_slot, debug_op):
"""Request disabling a debug tensor watchpoint or breakpoint.
This is the opposite of `request_watch()`.
Args:
node_name: (`str`) name of the node that the to-be-watched tensor belongs
to, e.g., "hidden/Weights".
output_slot: (`int`) output slot index of the tensor to watch.
debug_op: (`str`) name of the debug op to enable. This should not include
any attribute substrings.
"""
self._debug_ops_state_change_queue.put(
_state_change(
debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name,
output_slot, debug_op))
@property
def breakpoints(self):
"""Get a set of the currently-activated breakpoints.
Returns:
A `set` of 3-tuples: (node_name, output_slot, debug_op), e.g.,
{("MatMul", 0, "DebugIdentity")}.
"""
return self._breakpoints
def gated_grpc_debug_watches(self):
"""Get the list of debug watches with attribute gated_grpc=True.
Since the server receives `GraphDef` from the debugged runtime, it can only
return such debug watches that it has received so far.
Returns:
A `list` of `DebugWatch` `namedtuples` representing the debug watches with
gated_grpc=True. Each `namedtuple` element has the attributes:
`node_name` as a `str`,
`output_slot` as an `int`,
`debug_op` as a `str`.
"""
return list(self._gated_grpc_debug_watches)
def SendTracebacks(self, request, context):
"""Base implementation of the handling of SendTracebacks calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `CallTraceback` proto, containing information about the
type (e.g., graph vs. eager execution) and source-code traceback of the
call and (any) associated `tf.Graph`s.
context: Server context.
Returns:
A `EventReply` proto.
"""
return debug_service_pb2.EventReply()
def SendSourceFiles(self, request, context):
"""Base implementation of the handling of SendSourceFiles calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `DebuggedSourceFiles` proto, containing the path, content, size
and last-modified timestamp of source files.
context: Server context.
Returns:
A `EventReply` proto.
"""
return debug_service_pb2.EventReply()
|
tensorflow-master
|
tensorflow/python/debug/lib/grpc_debug_server.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger: Tools for debugging gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import uuid
import six
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
_GRADIENT_DEBUG_TAG = "gradient_debug_"
_gradient_debuggers = {}
def _tensor_to_grad_debug_op_name(tensor, grad_debugger_uuid):
op_name, slot = debug_graphs.parse_node_or_tensor_name(tensor.name)
return "%s_%d/%s%s" % (op_name, slot, _GRADIENT_DEBUG_TAG, grad_debugger_uuid)
def _parse_grad_debug_op_name(op_name):
"""Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op.
"""
name_items = op_name.split("/")
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if "_" in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index("_")]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind("_") + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind("_")]
orig_tensor_name = ("/".join(name_items[:-2] + [orig_base_op_name]) +
":%d" % orig_tensor_slot)
return grad_debugger_uuid, orig_tensor_name
class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., `tf.gradients` and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with repsect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
are created, e.g., during `tf.gradients` calls or the construction
of optimization (training) op that uses `tf.gradients`.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
identity_op = (
gen_array_ops.debug_gradient_ref_identity
if input_tensor.dtype._is_ref_dtype else
gen_array_ops.debug_gradient_identity)
# pylint: enable=protected-access
debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)
assert debug_grad_identity.dtype == input_tensor.dtype
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with repsect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(y):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Returns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method, this method is used after the
construction of the forward graph has completed. Unlike the
`watch_gradients_by_tensor` method, this method does not use handles to the
tensors of interest; it uses their names.
This method is the same as `watch_gradients_by_tensors` except that the
x-tensors are specified by name patterns, instead of `tf.Tensor` or
`tf.Variable` objects.
Example:
```python
x = tf.Variable(1.0, name="x")
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(r"(x|y):0$"):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to x and y.
x_grad = grad_debugger.gradient_tensor("x:0")
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensor_name_regex: the regular-expression pattern of the name(s) of the
x-tensor(s) to watch. x-tensor refers to the tensors on the denominator
of the differentiation.
Returns:
The GradientsDebugger instance itself.
"""
tensor_name_pattern = re.compile(tensor_name_regex)
with graph.as_default():
for op in graph.get_operations():
for output in op.outputs:
if tensor_name_pattern.match(output.name):
debug_op = self.identify_gradient(output)
# Make a copy of output.consumers() since we'll modify the consumers
# TODO(skyewm): this is unnecessary once the C API is enabled
for consumer in list(output.consumers()):
if consumer == debug_op.op:
continue
# Locate the slot index of the original input.
for i, consumer_input in enumerate(consumer.inputs):
if consumer_input == output:
consumer._update_input(i, debug_op) # pylint: disable=protected-access
return self
def _check_same_graph(self, tensor):
if self._graph is None:
self._graph = tensor.graph
elif self._graph != tensor.graph:
raise ValueError(
"The graph of the value (%s) is not the same as the graph %s" %
(tensor.graph, self._graph))
def register_gradient_tensor(self,
x_tensor_name,
gradient_tensor):
"""Register the gradient tensor for an x-tensor.
Args:
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
the tensor on the denominator of the differentiation.
gradient_tensor: the gradient `tf.Tensor`.
"""
if len(_gradient_debuggers) == 1 or self._is_active_context:
self._check_same_graph(gradient_tensor)
self._gradient_tensors[x_tensor_name] = gradient_tensor
def gradient_tensor(self, x_tensor):
"""Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
"""
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError(
"This GradientsDebugger has not received any gradient tensor for "
"x-tensor %s" % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
def gradient_tensors(self):
"""Get the gradient tensors that this object is aware of.
Returns:
A dict mapping x-tensor names to gradient tensor objects. x-tensor refers
to the tensors on the denominator of the differentation.
"""
return self._gradient_tensors
def _get_tensor_name(self, tensor):
if isinstance(tensor, (ops.Tensor, variables.Variable)):
return tensor.name
elif isinstance(tensor, six.string_types):
return tensor
else:
raise TypeError(
"x_tensor must be a str or tf.Tensor or tf.Variable, "
"but instead has type %s" % type(tensor))
def clear_gradient_debuggers():
"""Clear all globally registered gradient debuggers."""
_gradient_debuggers.clear()
@ops.RegisterGradient("DebugGradientIdentity")
def _identify_gradient_grad(op, dy):
"""Gradient function for the DebugIdentity op."""
# TODO(cais): Allow overriding gradient.
grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)
grad_debugger = _gradient_debuggers[grad_debugger_uuid]
grad_debugger.register_gradient_tensor(orig_tensor_name, dy)
return dy
@ops.RegisterGradient("DebugGradientRefIdentity")
def _identify_gradient_grad_ref(op, dy):
"""Gradient function for the DebugIdentity op."""
return _identify_gradient_grad(op, dy)
def gradient_values_from_dump(grad_debugger, x_tensor, dump):
"""Find gradient values from a `DebugDumpDir` object.
Args:
grad_debugger: the `tf_debug.GradientsDebugger` instance to be used.
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
dump: A `tfdbg.DebugDumpDir` object.
Returns:
If this `GradientsDebugger` instance has the gradient tensor of `x_tensor`
registered: a list of `numpy.ndarray` representing the value of the
gradient tensor from `dump`. The list could be empty, if the gradient
tensor is not executed in the `tf.Session.run()` call that generated
the `dump`. The list could also contain multiple values of the gradient
tensor, e.g., if gradient tensor is computed repeatedly in a
`tf.while_loop` during the run that generated the `dump`.
Raises:
LookupError: If this `GradientsDebugger` instance does not have the
gradient tensor of `x_tensor` registered.
ValueError: If this `GradientsDebugger` has a `tf.Graph` object that
does not match the `tf.Graph` object of the `dump`.
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
"""
# TODO(cais): Use this method in LocalCLIDebugWrapperSession to present the
# gradient tensors to the TFDBG CLI.
# If possible, verify that the Python graph of the dump and that of this
# GradientsDebugger match.
if (dump.python_graph and grad_debugger.graph and
dump.python_graph != grad_debugger.graph):
raise ValueError(
"This GradientsDebugger instance has a graph (%s) that differs from "
"the graph of the DebugDumpDir object (%s)." %
(grad_debugger.graph, dump.python_graph))
gradient_tensor = grad_debugger.gradient_tensor(x_tensor)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(
gradient_tensor.name)
try:
return dump.get_tensors(node_name, output_slot, "DebugIdentity")
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
return []
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_gradients.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions that help to inspect Python source w.r.t. TF graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import numpy as np
from tensorflow.python.debug.lib import profiling
_TENSORFLOW_BASEDIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.normpath(os.path.abspath(__file__))))))
UNCOMPILED_SOURCE_SUFFIXES = (".py")
COMPILED_SOURCE_SUFFIXES = (".pyc", ".pyo")
def _norm_abs_path(file_path):
return os.path.normpath(os.path.abspath(file_path))
def is_extension_uncompiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in UNCOMPILED_SOURCE_SUFFIXES
def is_extension_compiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in COMPILED_SOURCE_SUFFIXES
def _convert_watch_key_to_tensor_name(watch_key):
return watch_key[:watch_key.rfind(":")]
def guess_is_tensorflow_py_library(py_file_path):
"""Guess whether a Python source file is a part of the tensorflow library.
Special cases:
1) Returns False for unit-test files in the library (*_test.py),
2) Returns False for files under python/debug/examples.
Args:
py_file_path: full path of the Python source file in question.
Returns:
(`bool`) Whether the file is a part of the tensorflow library.
Raises:
ValueError: if the extension name of py_file_path does not indicate a Python
source file (compiled or uncomplied).
"""
if (not is_extension_uncompiled_python_source(py_file_path) and
not is_extension_compiled_python_source(py_file_path)):
raise ValueError(
"Input file path (%s) is not a Python source file." % py_file_path)
py_file_path = _norm_abs_path(py_file_path)
return (py_file_path.startswith(_TENSORFLOW_BASEDIR) and
not py_file_path.endswith("_test.py") and
not os.path.dirname(py_file_path).endswith(
os.path.normpath("python/debug/examples")))
def load_source(source_file_path):
with open(source_file_path, "rU") as f:
source_text = f.read()
source_lines = source_text.split("\n")
line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3
return source_lines, line_num_width
def annotate_source(dump,
source_file_path,
do_dumped_tensors=False,
file_stack_top=False,
min_line=None,
max_line=None):
"""Annotate a Python source file with a list of ops created at each line.
(The annotation doesn't change the source file itself.)
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
source_file_path: (`str`) Path to the source file being annotated.
do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be
used to annotate the source file.
file_stack_top: (`bool`) Whether only the top stack trace in the
specified source file is to be annotated.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a list of op name(s) created at
that line, or tensor names if `do_dumped_tensors` is True.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot perform source annotation due to a lack of set "
"Python graph in the dump object")
source_file_path = _norm_abs_path(source_file_path)
line_to_op_names = {}
for op in py_graph.get_operations():
for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):
if (min_line is not None and line_number < min_line or
max_line is not None and line_number >= max_line):
continue
if _norm_abs_path(file_path) != source_file_path:
continue
if do_dumped_tensors:
watch_keys = dump.debug_watch_keys(op.name)
# Convert watch keys to unique Tensor names.
items_to_append = list(
set(map(_convert_watch_key_to_tensor_name, watch_keys)))
else:
items_to_append = [op.name]
if line_number in line_to_op_names:
line_to_op_names[line_number].extend(items_to_append)
else:
line_to_op_names[line_number] = items_to_append
if file_stack_top:
break
return line_to_op_names
def list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
"""Generate a list of source files with information regarding ops and tensors.
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
path_regex_whitelist: A regular-expression filter for source file path.
node_name_regex_whitelist: A regular-expression filter for node names.
Returns:
A list of tuples regarding the Python source files involved in constructing
the ops and tensors contained in `dump`. Each tuple is:
(source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line)
is_tf_library: (`bool`) A guess of whether the file belongs to the
TensorFlow Python library.
num_nodes: How many nodes were created by lines of this source file.
These include nodes with dumps and those without.
num_tensors: How many Tensors were created by lines of this source file.
These include Tensors with dumps and those without.
num_dumps: How many debug Tensor dumps were from nodes (and Tensors)
that were created by this source file.
first_line: The first line number (1-based) that created any nodes or
Tensors in this source file.
The list is sorted by ascending order of source_file_path.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot generate source list due to a lack of set "
"Python graph in the dump object")
path_to_node_names = collections.defaultdict(set)
path_to_tensor_names = collections.defaultdict(set)
path_to_first_line = {}
tensor_name_to_num_dumps = {}
path_regex = (re.compile(path_regex_whitelist)
if path_regex_whitelist else None)
node_name_regex = (re.compile(node_name_regex_whitelist)
if node_name_regex_whitelist else None)
to_skip_file_paths = set()
for op in py_graph.get_operations():
if node_name_regex and not node_name_regex.match(op.name):
continue
for file_path, line_number, _, _ in dump.node_traceback(op.name):
file_path = _norm_abs_path(file_path)
if (file_path in to_skip_file_paths or
path_regex and not path_regex.match(file_path) or
not os.path.isfile(file_path)):
to_skip_file_paths.add(file_path)
continue
path_to_node_names[file_path].add(op.name)
if file_path in path_to_first_line:
if path_to_first_line[file_path] > line_number:
path_to_first_line[file_path] = line_number
else:
path_to_first_line[file_path] = line_number
for output_tensor in op.outputs:
tensor_name = output_tensor.name
path_to_tensor_names[file_path].add(tensor_name)
watch_keys = dump.debug_watch_keys(op.name)
for watch_key in watch_keys:
node_name, output_slot, debug_op = watch_key.split(":")
tensor_name = "%s:%s" % (node_name, output_slot)
if tensor_name not in tensor_name_to_num_dumps:
tensor_name_to_num_dumps[tensor_name] = len(
dump.get_tensors(node_name, int(output_slot), debug_op))
path_to_num_dumps = {}
for path in path_to_tensor_names:
path_to_num_dumps[path] = sum(
tensor_name_to_num_dumps.get(tensor_name, 0)
for tensor_name in path_to_tensor_names[path])
output = []
for file_path in path_to_node_names:
output.append((
file_path,
guess_is_tensorflow_py_library(file_path),
len(path_to_node_names.get(file_path, {})),
len(path_to_tensor_names.get(file_path, {})),
path_to_num_dumps.get(file_path, 0),
path_to_first_line[file_path]))
return sorted(output, key=lambda x: x[0])
def annotate_source_against_profile(profile_data,
source_file_path,
node_name_filter=None,
op_type_filter=None,
min_line=None,
max_line=None):
"""Annotate a Python source file with profiling information at each line.
(The annotation doesn't change the source file itself.)
Args:
profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.
source_file_path: (`str`) Path to the source file being annotated.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a the namedtuple
`profiling.LineOrFuncProfileSummary`.
"""
source_file_path = _norm_abs_path(source_file_path)
node_name_regex = re.compile(node_name_filter) if node_name_filter else None
op_type_regex = re.compile(op_type_filter) if op_type_filter else None
line_to_profile_summary = {}
for profile_datum in profile_data:
if not profile_datum.file_path:
continue
if _norm_abs_path(profile_datum.file_path) != source_file_path:
continue
if (min_line is not None and profile_datum.line_number < min_line or
max_line is not None and profile_datum.line_number >= max_line):
continue
if (node_name_regex and
not node_name_regex.match(profile_datum.node_exec_stats.node_name)):
continue
if op_type_regex and not op_type_regex.match(profile_datum.op_type):
continue
if profile_datum.line_number not in line_to_profile_summary:
line_to_profile_summary[profile_datum.line_number] = (
profiling.AggregateProfile(profile_datum))
else:
line_to_profile_summary[profile_datum.line_number].add(profile_datum)
return line_to_profile_summary
|
tensorflow-master
|
tensorflow/python/debug/lib/source_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions to handle debug-dump data of TensorFlow Debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import json
import os
import platform
import re
import numpy as np
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# TODO(cais): Tie these string constants in with C++?
METADATA_FILE_PREFIX = "_tfdbg_"
CORE_METADATA_TAG = "core_metadata_"
GRAPH_FILE_TAG = "graph_"
DEVICE_TAG = "device_"
HASH_TAG = "hash"
FETCHES_INFO_FILE_TAG = "fetches_info_"
FEED_KEYS_INFO_FILE_TAG = "feed_keys_info_"
def _glob(glob_pattern):
if platform.system() == "Windows":
return glob.glob(glob_pattern)
else:
return gfile.Glob(glob_pattern)
class InconvertibleTensorProto(object):
"""Represents a TensorProto that cannot be converted to np.ndarray."""
def __init__(self, tensor_proto, initialized=True):
"""Constructor.
Args:
tensor_proto: the `TensorProto` object that cannot be represented as a
`np.ndarray` object.
initialized: (`bool`) whether the Tensor is initialized.
"""
self._tensor_proto = tensor_proto
self._initialized = initialized
def __str__(self):
output = "" if self._initialized else "Uninitialized tensor:\n"
output += str(self._tensor_proto)
return output
@property
def initialized(self):
return self._initialized
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a `Event` protobuf and the `Event`
protobuf contains a `Tensor` value.
Args:
event_file_path: (`str`) path to the event file.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`. For
uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return
`None`.
"""
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return load_tensor_from_event(event)
def load_tensor_from_event(event):
"""Load a tensor from an Event proto.
Args:
event: The Event proto, assumed to hold a tensor value in its
summary.value[0] field.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`, if
representation of the tensor value by a `numpy.ndarray` is possible.
For uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be represented as `numpy.ndarray` (e.g., `tf.resource`), return
the `TensorProto` protobuf object without converting it to a
`numpy.ndarray`.
"""
tensor_proto = event.summary.value[0].tensor
shape = tensor_util.TensorShapeProtoToList(tensor_proto.tensor_shape)
num_elements = 1
for shape_dim in shape:
num_elements *= shape_dim
if tensor_proto.tensor_content or tensor_proto.string_val or not num_elements:
# Initialized tensor or empty tensor.
if tensor_proto.dtype == types_pb2.DT_RESOURCE:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
try:
tensor_value = tensor_util.MakeNdarray(tensor_proto)
except KeyError:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
# Uninitialized tensor or tensor of unconvertible data type.
tensor_value = InconvertibleTensorProto(tensor_proto, False)
return tensor_value
def _load_graph_def_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return graph_pb2.GraphDef.FromString(event.graph_def)
def _load_log_message_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return event.log_message.message
def _is_graph_file(file_name):
return file_name.startswith(METADATA_FILE_PREFIX + GRAPH_FILE_TAG)
def _is_run_fetches_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FETCHES_INFO_FILE_TAG
def _is_run_feed_keys_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FEED_KEYS_INFO_FILE_TAG
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include `nan`s and `inf`s.
The signature of this function follows the requirement of the method
`DebugDumpDir.find()`.
Args:
datum: (`DebugTensorDatum`) Datum metadata.
tensor: (`numpy.ndarray` or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(`bool`) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicate.
if isinstance(tensor, InconvertibleTensorProto):
# Uninitialized tensor doesn't have bad numerical values.
# Also return False for data types that cannot be represented as numpy
# arrays.
return False
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
else:
return False
_CoreMetadata = collections.namedtuple("CoreMetadata", [
"global_step", "session_run_index", "executor_step_index", "input_names",
"output_names", "target_nodes"
])
def extract_core_metadata_from_event_proto(event):
json_metadata = json.loads(event.log_message.message)
return _CoreMetadata(json_metadata["global_step"],
json_metadata["session_run_index"],
json_metadata["executor_step_index"],
json_metadata["input_names"],
json_metadata["output_names"],
json_metadata["target_nodes"])
def device_name_to_device_path(device_name):
"""Convert device name to device path."""
device_name_items = compat.as_text(device_name).split("/")
device_name_items = [item.replace(":", "_") for item in device_name_items]
return METADATA_FILE_PREFIX + DEVICE_TAG + ",".join(device_name_items)
def device_path_to_device_name(device_dir):
"""Parse device name from device path.
Args:
device_dir: (str) a directory name for the device.
Returns:
(str) parsed device name.
"""
path_items = os.path.basename(device_dir)[
len(METADATA_FILE_PREFIX) + len(DEVICE_TAG):].split(",")
return "/".join([
path_item.replace("device_", "device:").replace("_", ":", 1)
for path_item in path_items])
class DebugTensorDatum(object):
"""A single tensor dumped by TensorFlow Debugger (tfdbg).
Contains metadata about the dumped tensor, including `timestamp`,
`node_name`, `output_slot`, `debug_op`, and path to the dump file
(`file_path`).
This type does not hold the generally space-expensive tensor value (numpy
array). Instead, it points to the file from which the tensor value can be
loaded (with the `get_tensor` method) if needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""`DebugTensorDatum` constructor.
Args:
dump_root: (`str`) Debug dump root directory. This path should not include
the path component that represents the device name (see also below).
debug_dump_rel_path: (`str`) Path to a debug dump file, relative to the
`dump_root`. The first item of this relative path is assumed to be
a path representing the name of the device that the Tensor belongs to.
See `device_path_to_device_name` for more details on the device path.
For example, suppose the debug dump root
directory is `/tmp/tfdbg_1` and the dump file is at
`/tmp/tfdbg_1/<device_path>/>ns_1/node_a_0_DebugIdentity_123456789`,
then the value of the debug_dump_rel_path should be
`<device_path>/ns_1/node_a_0_DebugIdenity_1234456789`.
Raises:
ValueError: If the base file name of the dump file does not conform to
the dump file naming pattern:
`node_name`_`output_slot`_`debug_op`_`timestamp`
"""
path_components = os.path.normpath(debug_dump_rel_path).split(os.sep)
self._device_name = device_path_to_device_name(path_components[0])
base = path_components[-1]
if base.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % base)
self._extended_timestamp = base.split("_")[-1]
# It may include an index suffix at the end if file path collision happened
# due to identical timestamps.
if "-" in self._extended_timestamp:
self._timestamp = int(
self._extended_timestamp[:self._extended_timestamp.find("-")])
else:
self._timestamp = int(self._extended_timestamp)
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
node_base_name = "_".join(base.split("_")[:-3])
self._node_name = "/".join(path_components[1:-1] + [node_base_name])
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
self._dump_size_bytes = (gfile.Stat(self._file_path).length if
gfile.Exists(self._file_path) else None)
def __str__(self):
return "{DebugTensorDatum (%s) %s:%d @ %s @ %d}" % (self.device_name,
self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (`Event`) file.
Returns:
The tensor loaded from the dump (`Event`) file.
"""
return load_tensor_from_event_file(self.file_path)
# TODO(cais): Add time unit suffix to timestamp and t0 (us).
@property
def timestamp(self):
"""Timestamp of when this tensor value was dumped.
Returns:
(`int`) The timestamp in microseconds.
"""
return self._timestamp
@property
def extended_timestamp(self):
"""Extended timestamp, possibly with an index suffix.
The index suffix, e.g., "-1", is for disambiguating multiple dumps of the
same tensor with the same timestamp, which can occur if the dumping events
are spaced by shorter than the temporal resolution of the timestamps.
Returns:
(`str`) The extended timestamp.
"""
return self._extended_timestamp
@property
def debug_op(self):
"""Name of the debug op.
Returns:
(`str`) debug op name (e.g., `DebugIdentity`).
"""
return self._debug_op
@property
def device_name(self):
"""Name of the device that the tensor belongs to.
Returns:
(`str`) device name.
"""
return self._device_name
@property
def node_name(self):
"""Name of the node from which the tensor value was dumped.
Returns:
(`str`) name of the node watched by the debug op.
"""
return self._node_name
@property
def output_slot(self):
"""Output slot index from which the tensor value was dumped.
Returns:
(`int`) output slot index watched by the debug op.
"""
return self._output_slot
@property
def tensor_name(self):
"""Name of the tensor watched by the debug op.
Returns:
(`str`) `Tensor` name, in the form of `node_name`:`output_slot`
"""
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
(`str`) A watch key, in the form of `tensor_name`:`debug_op`.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
"""Path to the file which stores the value of the dumped tensor."""
return self._file_path
@property
def dump_size_bytes(self):
"""Size of the dump file.
Unit: byte.
Returns:
If the dump file exists, size of the dump file, in bytes.
If the dump file does not exist, None.
"""
return self._dump_size_bytes
class WatchKeyDoesNotExistInDebugDumpDirError(ValueError):
pass
class DebugDumpDir(object):
"""Data set from a debug-dump directory on filesystem.
An instance of `DebugDumpDir` contains all `DebugTensorDatum` instances
in a tfdbg dump root directory.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""`DebugDumpDir` constructor.
Args:
dump_root: (`str`) path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: (`bool`) whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
ValueError: If more than one core metadata file is found under the dump
root directory.
"""
if not gfile.IsDirectory(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._core_metadata = []
# Find the list of devices.
self._dump_root = dump_root
self._load_core_metadata()
self._load_fetches_info()
self._load_feeds_info()
self._load_all_device_dumps(partition_graphs, validate)
self._python_graph = None
def _load_all_device_dumps(self, partition_graphs, validate):
"""Load the dump data for all devices."""
device_dirs = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + DEVICE_TAG + "*"))
self._device_names = []
self._t0s = {}
self._dump_tensor_data = {}
self._dump_graph_file_paths = {}
self._debug_watches = {}
self._watch_key_to_devices = {}
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
self._watch_key_to_dump_size_bytes = {}
for device_dir in device_dirs:
device_name = device_path_to_device_name(device_dir)
self._device_names.append(device_name)
self._load_device_dumps(device_name, device_dir)
self._load_partition_graphs(partition_graphs, validate)
self._calculate_t0()
for device_name in self._device_names:
self._create_tensor_watch_maps(device_name)
def _load_device_dumps(self, device_name, device_root):
"""Load `DebugTensorDatum` instances from the dump root of a given device.
Populates a map {device_name: a list of `DebugTensorDatum`}, where the list
is sorted by ascending timestamp.
This sorting order reflects the order in which the TensorFlow executor
processed the nodes of the graph. It is (one of many possible) topological
sort of the nodes. This is useful for displaying tensors in the debugger
frontend as well as for the use case in which the user wants to find a
"culprit tensor", i.e., the first tensor in the graph that exhibits certain
problematic properties, i.e., all zero values, or bad numerical values such
as nan and inf.
In addition, creates a map from node name to debug watches. In this Map,
the key is the watched node name; the value is a dictionary.
Of this dictionary, the key is the watched_output_slot.
This method attempts to load the debug watches from the tensor dump files
first, before loading the full set of debug watches from the partition
graphs as done later. This is necessary because sometimes the partition
graphs may not be available, e.g., when the run errors out.
Args:
device_name: (`str`) name of the device.
device_root: (`str`) dump root directory of the given device.
Raises:
ValueError: If GraphDef for the device is not available.
"""
self._dump_tensor_data[device_name] = []
self._debug_watches[device_name] = collections.defaultdict(
lambda: collections.defaultdict(set))
for root, _, files in gfile.Walk(device_root):
for f in files:
if _is_graph_file(f):
self._dump_graph_file_paths[device_name] = os.path.join(root, f)
else:
datum = self._dump_file_name_to_datum(root, f)
self._dump_tensor_data[device_name].append(datum)
self._debug_watches[device_name][datum.node_name][
datum.output_slot].add(datum.debug_op)
self._dump_tensor_data[device_name] = sorted(
self._dump_tensor_data[device_name],
key=lambda x: x.extended_timestamp)
if self._dump_tensor_data[device_name]:
self._t0s[device_name] = self._dump_tensor_data[device_name][0].timestamp
else:
self._t0s[device_name] = None
def _calculate_t0(self):
"""Calculate the first timestamp across all devices."""
t0s = [t0 for t0 in six.itervalues(self._t0s) if t0 is not None]
self._t0 = min(t0s) if t0s else None
def _load_core_metadata(self):
core_metadata_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + CORE_METADATA_TAG + "*"))
for core_metadata_file in core_metadata_files:
with gfile.Open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
self._core_metadata.append(
extract_core_metadata_from_event_proto(event))
def _load_fetches_info(self):
fetches_info_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + FETCHES_INFO_FILE_TAG + "*"))
self._run_fetches_info = []
for fetches_info_file in fetches_info_files:
self._run_fetches_info.append(
_load_log_message_from_event_file(fetches_info_file))
def _load_feeds_info(self):
feeds_info_files = _glob(os.path.join(
self._dump_root, METADATA_FILE_PREFIX + FEED_KEYS_INFO_FILE_TAG + "*"))
self._run_feed_keys_info = []
for feeds_info_file in feeds_info_files:
self._run_feed_keys_info.append(
_load_log_message_from_event_file(feeds_info_file))
def _dump_file_name_to_datum(self, dir_name, file_name):
"""Obtain a DebugTensorDatum from the directory and file name.
Args:
dir_name: (`str`) Name of the directory in which the dump file resides.
file_name: (`str`) Base name of the dump file.
Returns:
(`DebugTensorDatum`) The `DebugTensorDatum` loaded from the dump file.
"""
# Calculate the relative path of the dump file with respect to the root.
debug_dump_rel_path = os.path.join(
os.path.relpath(dir_name, self._dump_root), file_name)
return DebugTensorDatum(self._dump_root, debug_dump_rel_path)
def _create_tensor_watch_maps(self, device_name):
"""Create maps from tensor watch keys to datum and to timestamps.
Create a map from watch key (tensor name + debug op) to `DebugTensorDatum`
item. Also make a map from watch key to relative timestamp.
"relative" means (absolute timestamp - t0).
Args:
device_name: (str) name of the device.
"""
self._watch_key_to_datum[device_name] = {}
self._watch_key_to_rel_time[device_name] = {}
self._watch_key_to_dump_size_bytes[device_name] = {}
for datum in self._dump_tensor_data[device_name]:
if datum.watch_key not in self._watch_key_to_devices:
self._watch_key_to_devices[datum.watch_key] = {device_name}
else:
self._watch_key_to_devices[datum.watch_key].add(device_name)
if datum.watch_key not in self._watch_key_to_datum[device_name]:
self._watch_key_to_datum[device_name][datum.watch_key] = [datum]
self._watch_key_to_rel_time[device_name][datum.watch_key] = [
datum.timestamp - self._t0]
self._watch_key_to_dump_size_bytes[device_name][datum.watch_key] = [
datum.dump_size_bytes]
else:
self._watch_key_to_datum[device_name][datum.watch_key].append(datum)
self._watch_key_to_rel_time[device_name][datum.watch_key].append(
datum.timestamp - self._t0)
self._watch_key_to_dump_size_bytes[device_name][datum.watch_key].append(
datum.dump_size_bytes)
def set_python_graph(self, python_graph):
"""Provide Python `Graph` object to the wrapper.
Unlike the partition graphs, which are protobuf `GraphDef` objects, `Graph`
is a Python object and carries additional information such as the traceback
of the construction of the nodes in the graph.
Args:
python_graph: (ops.Graph) The Python Graph object.
"""
self._python_graph = python_graph
self._node_traceback = {}
if self._python_graph:
for op in self._python_graph.get_operations():
self._node_traceback[op.name] = op.traceback
@property
def python_graph(self):
"""Get the Python graph.
Returns:
If the Python graph has been set, returns a `tf.Graph` object. Otherwise,
returns None.
"""
return self._python_graph
@property
def core_metadata(self):
"""Metadata about the `Session.run()` call from the core runtime.
Of the three counters available in the return value, `global_step` is
supplied by the caller of the debugged `Session.run()`, while
`session_run_index` and `executor_step_index` are determined by the state
of the core runtime, automatically. For the same fetch list, feed keys and
debug tensor watch options, the same executor will be used and
`executor_step_index` should increase by one at a time. However, runs with
different fetch lists, feed keys and debug_tensor watch options that all
share the same `Session` object can lead to gaps in `session_run_index`.
Returns:
If core metadata are loaded, a `namedtuple` with the fields:
`global_step`: A global step count supplied by the caller of
`Session.run()`. It is optional to the caller. If the caller did not
supply this parameter, its value will be -1.
`session_run_index`: A sorted index for Run() calls to the underlying
TensorFlow `Session` object.
`executor_step_index`: A counter for invocations of a given runtime
executor. The same executor is re-used for the same fetched tensors,
target nodes, input feed keys and debug tensor watch options.
`input_names`: Names of the input (feed) Tensors.
`output_names`: Names of the output (fetched) Tensors.
`target_nodes`: Names of the target nodes.
If the core metadata have not been loaded, `None`.
If more than one core metadata files exist, return a list of the
`nametuple` described above.
"""
output = self._core_metadata
return output[0] if len(output) == 1 else output
@property
def dumped_tensor_data(self):
"""Retrieve dumped tensor data."""
if len(self.devices()) == 1:
return self._dump_tensor_data[self.devices()[0]]
else:
all_devices_data = six.itervalues(self._dump_tensor_data)
data = []
for device_data in all_devices_data:
data.extend(device_data)
return sorted(data, key=lambda x: x.extended_timestamp)
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor across all devices.
Returns:
(`int`) absolute timestamp of the first dumped tensor, in microseconds.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
(`int`) The total number of dumped tensors in the dump root directory.
"""
return sum(len(self._dump_tensor_data[device_name])
for device_name in self._dump_tensor_data)
def _load_partition_graphs(self, client_partition_graphs, validate):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
client_partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime, from the Python
client. These partition graphs are used only if partition graphs
cannot be loaded from the dump directory on the file system.
validate: (`bool`) Whether the dump files are to be validated against the
partition graphs.
Raises:
ValueError: If the partition GraphDef of one or more devices fail to be
loaded.
"""
self._debug_graphs = {}
self._node_devices = {}
partition_graphs_and_device_names = []
for device_name in self._device_names:
partition_graph = None
if device_name in self._dump_graph_file_paths:
partition_graph = _load_graph_def_from_event_file(
self._dump_graph_file_paths[device_name])
else:
logging.warn(
"Failed to load partition graphs for device %s from disk. "
"As a fallback, the client graphs will be used. This "
"may cause mismatches in device names." % device_name)
partition_graph = self._find_partition_graph(client_partition_graphs,
device_name)
if partition_graph:
partition_graphs_and_device_names.append((partition_graph,
device_name))
for partition_graph, maybe_device_name in partition_graphs_and_device_names:
debug_graph = debug_graphs.DebugGraph(partition_graph,
device_name=maybe_device_name)
self._debug_graphs[debug_graph.device_name] = debug_graph
self._collect_node_devices(debug_graph)
if validate and debug_graph.device_name in self._dump_tensor_data:
self._validate_dump_with_graphs(debug_graph.device_name)
def _find_partition_graph(self, partition_graphs, device_name):
if partition_graphs is None:
return None
else:
for graph_def in partition_graphs:
for node_def in graph_def.node:
if node_def.device == device_name:
return graph_def
return None
def _collect_node_devices(self, debug_graph):
for node_name in debug_graph.node_devices:
if node_name in self._node_devices:
self._node_devices[node_name] = self._node_devices[node_name].union(
debug_graph.node_devices[node_name])
else:
self._node_devices[node_name] = debug_graph.node_devices[node_name]
def _validate_dump_with_graphs(self, device_name):
"""Validate the dumped tensor data against the partition graphs.
Only the watched nodes are validated by this method, because tfdbg allows
clients to watch only a subset of the nodes.
Args:
device_name: (`str`) device name.
Raises:
LookupError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._debug_graphs:
raise LookupError(
"No partition graphs loaded for device %s" % device_name)
debug_graph = self._debug_graphs[device_name]
# Verify that the node names in the dump data are all present in the
# partition graphs.
for datum in self._dump_tensor_data[device_name]:
if datum.node_name not in debug_graph.node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs of "
"device %s." % (datum.node_name, device_name))
pending_inputs = {}
for node in debug_graph.node_inputs:
pending_inputs[node] = []
inputs = debug_graph.node_inputs[node]
for inp in inputs:
inp_node = debug_graphs.get_node_name(inp)
inp_output_slot = debug_graphs.get_output_slot(inp)
# Inputs from Enter and NextIteration nodes are not validated because
# DebugNodeInserter::InsertNodes() in the debugger core skips creating
# control edges from debug ops watching these types of nodes.
if (inp_node in self._debug_watches[device_name] and
inp_output_slot in self._debug_watches[device_name][inp_node] and
debug_graph.node_op_types.get(inp) not in (
"Enter", "NextIteration") and
(inp_node, inp_output_slot) not in pending_inputs[node]):
pending_inputs[node].append((inp_node, inp_output_slot))
for i, datum in enumerate(self._dump_tensor_data[device_name]):
node = datum.node_name
slot = datum.output_slot
# In some cases (e.g., system clocks with insufficient precision),
# the upstream and downstream tensors may have identical timestamps, the
# following check examines this possibility and avoids raising an error if
# that is the case.
if not self._satisfied_at_timestamp(
device_name, pending_inputs[node], datum.timestamp, start_i=i + 1):
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
recipients = debug_graph.node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if (node, slot) in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[
recipient_pending_inputs.index((node, slot))]
def _satisfied_at_timestamp(self, device_name, pending, timestamp, start_i=0):
"""Determine whether pending inputs are satisfied at given timestamp.
Note: This method mutates the input argument "pending".
Args:
device_name: (str) device name.
pending: A list of 2-tuple (node_name, output_slot): the dependencies to
check.
timestamp: (int) the timestamp in question.
start_i: (int) the index in self._dump_tensor_data to start searching for
the timestamp.
Returns:
(bool) Whether all the dependencies in pending are satisfied at the
timestamp. If pending is empty to begin with, return True.
"""
if not pending:
return True
for datum in self._dump_tensor_data[device_name][start_i:]:
if datum.timestamp > timestamp:
break
if (datum.timestamp == timestamp and
(datum.node_name, datum.output_slot) in pending):
pending.remove((datum.node_name, datum.output_slot))
if not pending:
return True
return not pending
def loaded_partition_graphs(self):
"""Test whether partition graphs have been loaded."""
return bool(self._debug_graphs)
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as a list of GraphDef.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
return [self._debug_graphs[key].debug_graph_def
for key in self._debug_graphs]
def reconstructed_non_debug_partition_graphs(self):
"""Reconstruct partition graphs with the debugger-inserted ops stripped.
The reconstructed partition graphs are identical to the original (i.e.,
non-debugger-decorated) partition graphs except in the following respects:
1) The exact names of the runtime-inserted internal nodes may differ.
These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.
2) As a consequence of 1, the nodes that receive input directly from such
send- and recv-type ops will have different input names.
3) The parallel_iteration attribute of while-loop Enter ops are set to 1.
Returns:
A dict mapping device names (`str`s) to reconstructed
`tf.compat.v1.GraphDef`s.
"""
non_debug_graphs = {}
for key in self._debug_graphs:
non_debug_graphs[key] = self._debug_graphs[key].non_debug_graph_def
return non_debug_graphs
@property
def run_fetches_info(self):
"""Get a str representation of the fetches used in the Session.run() call.
Returns:
If the information is available from one `Session.run` call, a `str`
obtained from `repr(fetches)`.
If the information is available from multiple `Session.run` calls, a
`list` of `str` from `repr(fetches)`.
If the information is not available, `None`.
"""
output = self._run_fetches_info
return output[0] if len(output) == 1 else output
@property
def run_feed_keys_info(self):
"""Get a str representation of the feed_dict used in the Session.run() call.
Returns:
If the information is available from one `Session.run` call, a `str`
obtained from `repr(feed_dict)`.
If the information is available from multiple `Session.run` calls, a
`list` of `str` obtained from `repr(feed_dict)`.
If the information is not available, `None`.
"""
output = self._run_feed_keys_info
return output[0] if len(output) == 1 else output
def _infer_device_name(self, device_name, node_name):
"""Infer the device name given node name.
If device_name is provided (i.e., not None), it'll be simply returned right
away.
Args:
device_name: (str or None) name of the device. If None, will try to infer
the device name by looking at the available nodes.
node_name: (str) name of the node.
Returns:
(str) Inferred name of the device, if available.
Raises:
ValueError: If the node name does not exist on any of the available
devices or if there are multiple devices that contain the node with
the given name.
"""
if device_name is None:
if node_name in self._node_devices:
if len(self._node_devices[node_name]) == 1:
return list(self._node_devices[node_name])[0]
else:
raise ValueError(
"There are multiple (%d) devices with nodes named '%s' but "
"device_name is not specified." %
(len(self._node_devices[node_name]), node_name))
else:
raise ValueError("None of the %d device(s) has a node named '%s'." %
(len(self._device_names), node_name))
else:
return device_name
def nodes(self, device_name=None):
"""Get a list of all nodes from the partition graphs.
Args:
device_name: (`str`) name of device. If None, all nodes from all available
devices will be included.
Returns:
All nodes' names, as a list of str.
Raises:
LookupError: If no partition graphs have been loaded.
ValueError: If specified node name does not exist.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
if device_name is None:
nodes = []
for device_name in self._debug_graphs:
nodes.extend(self._debug_graphs[device_name].node_inputs.keys())
return nodes
else:
if device_name not in self._debug_graphs:
raise ValueError("Invalid device name: %s" % device_name)
return self._debug_graphs[device_name].node_inputs.keys()
def node_attributes(self, node_name, device_name=None):
"""Get the attributes of a node.
Args:
node_name: Name of the node in question.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
Attributes of the node.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if not self._debug_graphs:
raise LookupError("No partition graphs have been loaded.")
device_name = self._infer_device_name(device_name, node_name)
return self._debug_graphs[device_name].node_attributes[node_name]
def node_inputs(self, node_name, is_control=False, device_name=None):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: (`bool`) Whether control inputs, rather than non-control
inputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
if is_control:
return self._debug_graphs[device_name].node_ctrl_inputs[node_name]
else:
return self._debug_graphs[device_name].node_inputs[node_name]
def transitive_inputs(self,
node_name,
include_control=True,
include_reversed_ref=False,
device_name=None,):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
include_control: Include control inputs (True by default).
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
input_lists = [self._debug_graphs[device_name].node_inputs]
if include_control:
input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs)
if include_reversed_ref:
input_lists.append(
self._debug_graphs[device_name].node_reversed_ref_inputs)
tracer = debug_graphs.DFSGraphTracer(
input_lists,
skip_node_names=self._get_merge_node_names(device_name))
tracer.trace(node_name)
return tracer.inputs()
def _get_merge_node_names(self, device_name):
"""Lazily get a list of Merge nodes on a given device."""
if device_name not in self._device_names:
raise ValueError("Invalid device name: %s" % device_name)
if not hasattr(self, "_merge_node_names"):
self._merge_node_names = {}
if device_name not in self._merge_node_names:
debug_graph = self._debug_graphs[device_name]
self._merge_node_names[device_name] = [
node for node in debug_graph.node_op_types
if debug_graph.node_op_types[node] == "Merge"]
return self._merge_node_names[device_name]
def find_some_path(self,
src_node_name,
dst_node_name,
include_control=True,
include_reversed_ref=False,
device_name=None):
"""Find a path between a source node and a destination node.
Limitation: the source and destination are required to be on the same
device, i.e., this method does not yet take into account Send/Recv nodes
across devices.
TODO(cais): Make this method work across device edges by tracing Send/Recv
nodes.
Args:
src_node_name: (`str`) name of the source node or name of an output tensor
of the node.
dst_node_name: (`str`) name of the destination node or name of an output
tensor of the node.
include_control: (`bool`) whrther control edges are considered in the
graph tracing.
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally let the recipient (e.g., B in this case) mutate the value of
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
A path from the src_node_name to dst_node_name, as a `list` of `str`, if
it exists. The list includes src_node_name as the first item and
dst_node_name as the last.
If such a path does not exist, `None`.
Raises:
ValueError: If the source and destination nodes are not on the same
device.
"""
src_device_name = self._infer_device_name(device_name, src_node_name)
dst_device_name = self._infer_device_name(device_name, dst_node_name)
if src_device_name != dst_device_name:
raise ValueError(
"Source (%s) and destination (%s) are not on the same device: "
"%s vs. %s" % (src_node_name, dst_node_name, src_device_name,
dst_device_name))
input_lists = [self._debug_graphs[dst_device_name].node_inputs]
debug_graph = self._debug_graphs[dst_device_name]
if include_control:
input_lists.append(debug_graph.node_ctrl_inputs)
if include_reversed_ref:
input_lists.append(debug_graph.node_reversed_ref_inputs)
tracer = debug_graphs.DFSGraphTracer(
input_lists,
skip_node_names=self._get_merge_node_names(dst_device_name),
destination_node_name=src_node_name)
# Here the value of destination_node_name is src_node_name, because we
# are tracing the graph from output to its inputs (i.e., going backwards
# on the graph).
try:
tracer.trace(dst_node_name)
except debug_graphs.GraphTracingReachedDestination:
# Prune nodes not on the path.
inputs = [dst_node_name] + tracer.inputs()
depth_list = [0] + tracer.depth_list()
path = []
curr_depth = depth_list[-1]
for inp, depth in zip(reversed(inputs), reversed(depth_list)):
if depth == curr_depth:
path.append(inp)
curr_depth -= 1
return path
def node_recipients(self, node_name, is_control=False, device_name=None):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node recipients are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
debug_graph = self._debug_graphs[device_name]
if is_control:
return debug_graph.node_ctrl_recipients[node_name]
else:
return debug_graph.node_recipients[node_name]
def devices(self):
"""Get the list of device names.
Returns:
(`list` of `str`) names of the devices.
"""
return self._device_names
def node_exists(self, node_name, device_name=None):
"""Test if a node exists in the partition graphs.
Args:
node_name: (`str`) name of the node to be checked.
device_name: optional device name. If None, will search for the node
on all available devices. Otherwise, search for the node only on
the given device.
Returns:
A boolean indicating whether the node exists.
Raises:
LookupError: If no partition graphs have been loaded yet.
ValueError: If device_name is specified but cannot be found.
"""
if not self._debug_graphs:
raise LookupError(
"Nodes have not been loaded from partition graphs yet.")
if (device_name is not None) and device_name not in self._debug_graphs:
raise ValueError(
"The specified device_name '%s' cannot be found." % device_name)
for _, debug_graph in self._debug_graphs.items():
if node_name in debug_graph.node_inputs:
return True
return False
def node_device(self, node_name):
"""Get the names of the devices that has nodes of the specified name.
Args:
node_name: (`str`) name of the node.
Returns:
(`str` or `list` of `str`) name of the device(s) on which the node of the
given name is found. Returns a `str` if there is only one such device,
otherwise return a `list` of `str`.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if not self._debug_graphs:
raise LookupError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
output = list(self._node_devices[node_name])
return output[0] if len(output) == 1 else output
def node_op_type(self, node_name, device_name=None):
"""Get the op type of given node.
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`str`) op type of the node.
Raises:
LookupError: If node op types have not been loaded
from partition graphs yet.
"""
if not self._debug_graphs:
raise LookupError(
"Node op types are not loaded from partition graphs yet.")
device_name = self._infer_device_name(device_name, node_name)
return self._debug_graphs[device_name].node_op_types[node_name]
def debug_watch_keys(self, node_name, device_name=None):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all debug tensor watch keys. Returns an empty list if
the node name does not correspond to any debug watch keys.
Raises:
`LookupError`: If debug watch information has not been loaded from
partition graphs yet.
"""
try:
device_name = self._infer_device_name(device_name, node_name)
except ValueError:
return []
if node_name not in self._debug_watches[device_name]:
return []
watch_keys = []
for watched_slot in self._debug_watches[device_name][node_name]:
debug_ops = self._debug_watches[device_name][node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key, device_name=None):
"""Get all `DebugTensorDatum` instances corresponding to a debug watch key.
Args:
debug_watch_key: (`str`) debug watch key.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
A list of `DebugTensorDatum` instances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If there are multiple devices that have the debug_watch_key,
but device_name is not specified.
"""
if device_name is None:
matching_device_names = [
name for name in self._watch_key_to_datum
if debug_watch_key in self._watch_key_to_datum[name]]
if not matching_device_names:
return []
elif len(matching_device_names) == 1:
device_name = matching_device_names[0]
else:
raise ValueError(
"The debug watch key '%s' exists on multiple (%d) devices, but "
"device name is not specified." %
(debug_watch_key, len(matching_device_names)))
elif device_name not in self._debug_key_to_datum:
raise ValueError(
"There is no device named '%s' consisting of debug watch keys." %
device_name)
return self._watch_key_to_datum[device_name].get(debug_watch_key, [])
def find(self,
predicate,
first_n=0,
device_name=None,
exclude_node_names=None):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
```python
def predicate(debug_tensor_datum, tensor):
# returns a bool
```
where `debug_tensor_datum` is an instance of `DebugTensorDatum`, which
carries the metadata, such as the `Tensor`'s node name, output slot
timestamp, debug op name, etc.; and `tensor` is the dumped tensor value
as a `numpy.ndarray`.
first_n: (`int`) return only the first n `DebugTensotDatum` instances (in
time order) for which the predicate returns True. To return all the
`DebugTensotDatum` instances, let first_n be <= 0.
device_name: optional device name.
exclude_node_names: Optional regular expression to exclude nodes with
names matching the regular expression.
Returns:
A list of all `DebugTensorDatum` objects in this `DebugDumpDir` object
for which predicate returns True, sorted in ascending order of the
timestamp.
"""
if exclude_node_names:
exclude_node_names = re.compile(exclude_node_names)
matched_data = []
for device in (self._dump_tensor_data if device_name is None
else (self._dump_tensor_data[device_name],)):
for datum in self._dump_tensor_data[device]:
if exclude_node_names and exclude_node_names.match(datum.node_name):
continue
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
return matched_data
return matched_data
def get_tensor_file_paths(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in
the debug-dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
return [datum.file_path for datum in
self._watch_key_to_datum[device_name][watch_key]]
def get_tensors(self, node_name, output_slot, debug_op, device_name=None):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
List of tensors (`numpy.ndarray`) loaded from the debug-dump file(s).
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in
the debug-dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
try:
device_name = self._infer_device_name(device_name, node_name)
return [datum.get_tensor() for datum in
self._watch_key_to_datum[device_name][watch_key]]
except (ValueError, KeyError):
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
def get_rel_timestamps(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - `t0`), where `t0` is the
absolute timestamp of the first dumped tensor in the dump root. The tensor
may be dumped multiple times in the dump root directory, so a list of
relative timestamps (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`) list of relative timestamps.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump" % watch_key)
# TODO(cais): Figure out whether this should be relative to the global t0.
return self._watch_key_to_rel_time[device_name][watch_key]
def get_dump_sizes_bytes(self,
node_name,
output_slot,
debug_op,
device_name=None):
"""Get the sizes of the dump files for a debug-dumped tensor.
Unit of the file size: byte.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`): list of dump file sizes in bytes.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data.
"""
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError(
"Watch key \"%s\" does not exist in the debug dump of device %s" %
(watch_key, device_name))
return self._watch_key_to_dump_size_bytes[device_name][watch_key]
def node_traceback(self, element_name):
"""Try to retrieve the Python traceback of node's construction.
Args:
element_name: (`str`) Name of a graph element (node or tensor).
Returns:
(list) The traceback list object as returned by the `extract_trace`
method of Python's traceback module.
Raises:
LookupError: If Python graph is not available for traceback lookup.
KeyError: If the node cannot be found in the Python graph loaded.
"""
if self._python_graph is None:
raise LookupError("Python graph is not available for traceback lookup")
node_name = debug_graphs.get_node_name(element_name)
if node_name not in self._node_traceback:
raise KeyError("Cannot find node \"%s\" in Python graph" % node_name)
return self._node_traceback[node_name]
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_data.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for common values and methods of TensorFlow Debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.python.debug.lib import common
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CommonTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testOnFeedOneFetch(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
run_key = common.get_run_key({"a": a}, [b])
loaded = json.loads(run_key)
self.assertItemsEqual(["a:0"], loaded[0])
self.assertItemsEqual(["b:0"], loaded[1])
@test_util.run_deprecated_v1
def testGetRunKeyFlat(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
run_key = common.get_run_key({"a": a}, [a, b])
loaded = json.loads(run_key)
self.assertItemsEqual(["a:0"], loaded[0])
self.assertItemsEqual(["a:0", "b:0"], loaded[1])
@test_util.run_deprecated_v1
def testGetRunKeyNestedFetches(self):
a = constant_op.constant(10.0, name="a")
b = constant_op.constant(20.0, name="b")
c = constant_op.constant(30.0, name="c")
d = constant_op.constant(30.0, name="d")
run_key = common.get_run_key(
{}, {"set1": [a, b], "set2": {"c": c, "d": d}})
loaded = json.loads(run_key)
self.assertItemsEqual([], loaded[0])
self.assertItemsEqual(["a:0", "b:0", "c:0", "d:0"], loaded[1])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/common_test.py
|
tensorflow-master
|
tensorflow/python/debug/lib/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods for processing debugger-decorated graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.platform import tf_logging as logging
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def get_node_name(element_name):
node_name, _ = parse_node_or_tensor_name(element_name)
return node_name
def get_output_slot(element_name):
"""Get the output slot number from the name of a graph element.
If element_name is a node name without output slot at the end, 0 will be
assumed.
Args:
element_name: (`str`) name of the graph element in question.
Returns:
(`int`) output slot number.
"""
_, output_slot = parse_node_or_tensor_name(element_name)
return output_slot if output_slot is not None else 0
def is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
class GraphTracingReachedDestination(Exception):
pass
class DFSGraphTracer(object):
"""Graph input tracer using depth-first search."""
def __init__(self,
input_lists,
skip_node_names=None,
destination_node_name=None):
"""Constructor of _DFSGraphTracer.
Args:
input_lists: A list of dicts. Each dict is an adjacency (input) map from
the recipient node name as the key and the list of input node names
as the value.
skip_node_names: Optional: a list of node names to skip tracing.
destination_node_name: Optional: destination node name. If not `None`, it
should be the name of a destination not as a str and the graph tracing
will raise GraphTracingReachedDestination as soon as the node has been
reached.
Raises:
GraphTracingReachedDestination: if stop_at_node_name is not None and
the specified node is reached.
"""
self._input_lists = input_lists
self._skip_node_names = skip_node_names
self._inputs = []
self._visited_nodes = []
self._depth_count = 0
self._depth_list = []
self._destination_node_name = destination_node_name
def trace(self, graph_element_name):
"""Trace inputs.
Args:
graph_element_name: Name of the node or an output tensor of the node, as a
str.
Raises:
GraphTracingReachedDestination: if destination_node_name of this tracer
object is not None and the specified node is reached.
"""
self._depth_count += 1
node_name = get_node_name(graph_element_name)
if node_name == self._destination_node_name:
raise GraphTracingReachedDestination()
if node_name in self._skip_node_names:
return
if node_name in self._visited_nodes:
return
self._visited_nodes.append(node_name)
for input_list in self._input_lists:
if node_name not in input_list:
continue
for inp in input_list[node_name]:
if get_node_name(inp) in self._visited_nodes:
continue
self._inputs.append(inp)
self._depth_list.append(self._depth_count)
self.trace(inp)
self._depth_count -= 1
def inputs(self):
return self._inputs
def depth_list(self):
return self._depth_list
def _infer_device_name(graph_def):
"""Infer device name from a partition GraphDef."""
device_name = None
for node in graph_def.node:
if node.device:
device_name = node.device
break
if device_name is None:
logging.warn(
"Failed to infer device name from partition GraphDef: none of the "
"nodes of the GraphDef has a non-empty device name.")
return device_name
class DebugGraph(object):
"""Represents a debugger-decorated graph."""
def __init__(self, debug_graph_def, device_name=None):
self._debug_graph_def = debug_graph_def
self._non_debug_graph_def = None
self._node_attributes = {}
self._node_inputs = {}
self._node_reversed_ref_inputs = {}
self._node_ctrl_inputs = {}
self._node_recipients = {}
self._node_ctrl_recipients = {}
self._node_devices = {}
self._node_op_types = {}
self._copy_send_nodes = []
self._ref_args = {}
self._device_name = device_name
if not self._device_name:
self._device_name = _infer_device_name(debug_graph_def)
for node in debug_graph_def.node:
self._process_debug_graph_node(node)
self._prune_non_control_edges_of_debug_ops()
self._prune_control_edges_of_debug_ops()
self._prune_nodes_from_input_and_recipient_maps(self._get_copy_nodes())
self._populate_recipient_maps()
def _process_debug_graph_node(self, node):
"""Process a node from the debug GraphDef.
Args:
node: (NodeDef) A partition-graph node to be processed.
Raises:
ValueError: If duplicate node names are encountered.
"""
if is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
return
if node.name in self._node_inputs:
raise ValueError("Duplicate node name on device %s: '%s'" %
(self._device_name, node.name))
self._node_attributes[node.name] = node.attr
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
if node.name not in self._node_devices:
self._node_devices[node.name] = set()
self._node_devices[node.name].add(
node.device if node.device else self._device_name)
self._node_op_types[node.name] = node.op
self._ref_args[node.name] = self._get_ref_args(node)
for inp in node.input:
if is_copy_node(inp) and (node.op == "_Send" or node.op == "_Retval"):
self._copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
def _get_ref_args(self, node):
"""Determine whether an input of an op is ref-type.
Args:
node: A `NodeDef`.
Returns:
A list of the arg names (as strs) that are ref-type.
"""
op_def = op_def_registry.get_registered_ops().get(node.op)
ref_args = []
if op_def:
for i, output_arg in enumerate(op_def.output_arg):
if output_arg.is_ref:
arg_name = node.name if i == 0 else ("%s:%d" % (node.name, i))
ref_args.append(arg_name)
return ref_args
def _get_copy_nodes(self):
"""Find all Copy nodes in the loaded graph."""
copy_nodes = []
for node in self._node_inputs:
if is_copy_node(node):
copy_nodes.append(node)
return copy_nodes
def _prune_non_control_edges_of_debug_ops(self):
"""Prune (non-control) edges related to debug ops.
Prune the Copy ops and associated _Send ops inserted by the debugger out
from the non-control inputs and output recipients map. Replace the inputs
and recipients with original ones.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
def _prune_control_edges_of_debug_ops(self):
"""Prune control edges related to the debug ops."""
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
def _populate_recipient_maps(self):
"""Populate the map from node name to recipient(s) of its output(s).
This method also populates the input map based on reversed ref edges.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
inp = get_node_name(inp)
if inp not in self._node_recipients:
self._node_recipients[inp] = []
self._node_recipients[inp].append(node)
if inp in self._ref_args:
if inp not in self._node_reversed_ref_inputs:
self._node_reversed_ref_inputs[inp] = []
self._node_reversed_ref_inputs[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in self._copy_send_nodes:
continue
if ctrl_inp not in self._node_ctrl_recipients:
self._node_ctrl_recipients[ctrl_inp] = []
self._node_ctrl_recipients[ctrl_inp].append(node)
def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):
"""Prune nodes out of input and recipient maps.
Args:
nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.
"""
for node in nodes_to_prune:
del self._node_inputs[node]
del self._node_ctrl_inputs[node]
del self._node_recipients[node]
del self._node_ctrl_recipients[node]
def _reconstruct_non_debug_graph_def(self):
"""Reconstruct non-debug GraphDef.
Non-debug GraphDef means the original GraphDef without the Copy* and Debug
nodes inserted by the debugger.
"""
if self._non_debug_graph_def:
return
self._non_debug_graph_def = graph_pb2.GraphDef()
for node in self._debug_graph_def.node:
if is_copy_node(node.name) or is_debug_node(node.name):
continue
new_node = self._non_debug_graph_def.node.add()
new_node.CopyFrom(node)
# Redo the list of inputs, because in _debug_graph_def, the list can
# consist of Copy* and Debug* nodes inserted by the debugger. Those will
# be replaced with the original inputs here.
del new_node.input[:]
for inp in self._node_inputs[node.name]:
new_node.input.append(inp)
for ctrl_inp in self._node_ctrl_inputs[node.name]:
new_node.input.append("^" + ctrl_inp)
@property
def device_name(self):
return self._device_name
@property
def debug_graph_def(self):
"""The debugger-decorated GraphDef."""
return self._debug_graph_def
@property
def non_debug_graph_def(self):
"""The GraphDef without the Copy* and Debug* nodes added by the debugger."""
self._reconstruct_non_debug_graph_def()
return self._non_debug_graph_def
@property
def node_devices(self):
return self._node_devices
@property
def node_op_types(self):
return self._node_op_types
@property
def node_attributes(self):
return self._node_attributes
@property
def node_inputs(self):
return self._node_inputs
@property
def node_ctrl_inputs(self):
return self._node_ctrl_inputs
@property
def node_reversed_ref_inputs(self):
return self._node_reversed_ref_inputs
@property
def node_recipients(self):
return self._node_recipients
@property
def node_ctrl_recipients(self):
return self._node_ctrl_recipients
def reconstruct_non_debug_graph_def(debug_graph_def):
"""Reconstruct original (non-debugger-decorated) partition GraphDef.
This method strips the input `tf.compat.v1.GraphDef` of the Copy* and
Debug*-type nodes inserted by the debugger.
The reconstructed partition graph is identical to the original (i.e.,
non-debugger-decorated) partition graph except in the following respects:
1) The exact names of the runtime-inserted internal nodes may differ.
These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.
2) As a consequence of 1, the nodes that receive input directly from such
send- and recv-type ops will have different input names.
3) The parallel_iteration attribute of while-loop Enter ops are set to 1.
Args:
debug_graph_def: The debugger-decorated `tf.compat.v1.GraphDef`, with the
debugger-inserted Copy* and Debug* nodes.
Returns:
The reconstructed `tf.compat.v1.GraphDef` stripped of the debugger-inserted
nodes.
"""
return DebugGraph(debug_graph_def).non_debug_graph_def
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_graphs.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sending large-size data through tfdbg grpc channels.
"Large-size data" includes large GraphDef protos and large Tensor protos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
(cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
cls.debug_server
) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
tf_logging.info("debug server url: %s", cls.debug_server_url)
@classmethod
def tearDownClass(cls):
cls.debug_server.stop_server().wait()
cls.debug_server_thread.join()
def tearDown(self):
ops.reset_default_graph()
self.debug_server.clear_data()
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeGraphDefsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u = variables.VariableV1(42.0, name="original_u")
for _ in xrange(50 * 1000):
u = array_ops.identity(u)
sess.run(variables.global_variables_initializer())
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"original_u")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
self.assertAllClose(42.0, sess.run(u))
self.assertAllClose(
[42.0],
self.debug_server.debug_tensor_values["original_u:0:DebugIdentity"])
self.assertEqual(2 if test.is_gpu_available() else 1,
len(self.debug_server.partition_graph_defs))
max_graph_def_size = max([
len(graph_def.SerializeToString())
for graph_def in self.debug_server.partition_graph_defs])
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val_array = list(xrange(1200 * 1024))
# Size: 4 * 1200 * 1024 = 4800k > 4M
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.float32, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds # Unused by this watch_fn.
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingStringTensorWithAlmostTooLargeStringsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val = [
b"", b"spam", b"A" * 2500 * 1024, b"B" * 2500 * 1024, b"egg", b""]
u_init = constant_op.constant(
u_init_val, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
strs_total_size_threshold = 5000 * 1024
cum_size = 0
u_init_val_array = []
while cum_size < strs_total_size_threshold:
strlen = np.random.randint(200)
u_init_val_array.append(b"A" * strlen)
cum_size += strlen
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingEmptyFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.float32, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.float32, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
@test_util.run_v1_only("currently failing on v2")
def testSendingEmptyStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.string, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.object, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/grpc_large_data_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.VariableV1(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
@test_util.run_v1_only("b/120545219")
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.compat.v1.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.VariableV1(str1_init, name=str1_name)
str2 = variables.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.VariableV1(10.0, name="x")
y = variables.VariableV1(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
self.assertEqual(y_name, bad_data[0].node_name)
self.assertEqual(z_name, bad_data[1].node_name)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(y_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.VariableV1(1.0, name="v1")
v2 = variables.VariableV1(2.0, name="v2")
v3 = variables.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.VariableV1(10.0, name="v")
delta = variables.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.VariableV1(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.VariableV1("1", name="a")
b = variables.VariableV1("3", name="b")
c = variables.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.VariableV1([10.0, 10.0], name="a")
b = variables.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.VariableV1(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/session_debug_testlib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GRPC debug server for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import functools
import hashlib
import json
import os
import re
import shutil
import tempfile
import threading
import time
import portpicker
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_server
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
from tensorflow.python.util import compat
def _get_dump_file_path(dump_root, device_name, debug_node_name):
"""Get the file path of the dump file for a debug node.
Args:
dump_root: (str) Root dump directory.
device_name: (str) Name of the device that the debug node resides on.
debug_node_name: (str) Name of the debug node, e.g.,
cross_entropy/Log:0:DebugIdentity.
Returns:
(str) Full path of the dump file.
"""
dump_root = os.path.join(
dump_root, debug_data.device_name_to_device_path(device_name))
if "/" in debug_node_name:
dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))
dump_file_name = re.sub(":", "_", os.path.basename(debug_node_name))
else:
dump_dir = dump_root
dump_file_name = re.sub(":", "_", debug_node_name)
now_microsec = int(round(time.time() * 1000 * 1000))
dump_file_name += "_%d" % now_microsec
return os.path.join(dump_dir, dump_file_name)
class EventListenerTestStreamHandler(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of EventListenerBaseStreamHandler that dumps to file."""
def __init__(self, dump_dir, event_listener_servicer):
super(EventListenerTestStreamHandler, self).__init__()
self._dump_dir = dump_dir
self._event_listener_servicer = event_listener_servicer
if self._dump_dir:
self._try_makedirs(self._dump_dir)
self._grpc_path = None
self._cached_graph_defs = []
self._cached_graph_def_device_names = []
self._cached_graph_def_wall_times = []
def on_core_metadata_event(self, event):
self._event_listener_servicer.toggle_watch()
core_metadata = json.loads(event.log_message.message)
if not self._grpc_path:
grpc_path = core_metadata["grpc_path"]
if grpc_path:
if grpc_path.startswith("/"):
grpc_path = grpc_path[1:]
if self._dump_dir:
self._dump_dir = os.path.join(self._dump_dir, grpc_path)
# Write cached graph defs to filesystem.
for graph_def, device_name, wall_time in zip(
self._cached_graph_defs,
self._cached_graph_def_device_names,
self._cached_graph_def_wall_times):
self._write_graph_def(graph_def, device_name, wall_time)
if self._dump_dir:
self._write_core_metadata_event(event)
else:
self._event_listener_servicer.core_metadata_json_strings.append(
event.log_message.message)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the tensor value-carrying Event proto callback.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
if self._dump_dir:
if self._grpc_path:
self._write_graph_def(graph_def, device_name, wall_time)
else:
self._cached_graph_defs.append(graph_def)
self._cached_graph_def_device_names.append(device_name)
self._cached_graph_def_wall_times.append(wall_time)
else:
self._event_listener_servicer.partition_graph_defs.append(graph_def)
def on_value_event(self, event):
"""Implementation of the tensor value-carrying Event proto callback.
Writes the Event proto to the file system for testing. The path written to
follows the same pattern as the file:// debug URLs of tfdbg, i.e., the
name scope of the op becomes the directory structure under the dump root
directory.
Args:
event: The Event proto carrying a tensor value.
Returns:
If the debug node belongs to the set of currently activated breakpoints,
a `EventReply` proto will be returned.
"""
if self._dump_dir:
self._write_value_event(event)
else:
value = event.summary.value[0]
tensor_value = debug_data.load_tensor_from_event(event)
self._event_listener_servicer.debug_tensor_values[value.node_name].append(
tensor_value)
items = event.summary.value[0].node_name.split(":")
node_name = items[0]
output_slot = int(items[1])
debug_op = items[2]
if ((node_name, output_slot, debug_op) in
self._event_listener_servicer.breakpoints):
return debug_service_pb2.EventReply()
def _try_makedirs(self, dir_path):
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _write_core_metadata_event(self, event):
core_metadata_path = os.path.join(
self._dump_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG +
"_%d" % event.wall_time)
self._try_makedirs(self._dump_dir)
with open(core_metadata_path, "wb") as f:
f.write(event.SerializeToString())
def _write_graph_def(self, graph_def, device_name, wall_time):
encoded_graph_def = graph_def.SerializeToString()
graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16)
event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
graph_file_path = os.path.join(
self._dump_dir,
debug_data.device_name_to_device_path(device_name),
debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
self._try_makedirs(os.path.dirname(graph_file_path))
with open(graph_file_path, "wb") as f:
f.write(event.SerializeToString())
def _write_value_event(self, event):
value = event.summary.value[0]
# Obtain the device name from the metadata.
summary_metadata = event.summary.value[0].metadata
if not summary_metadata.plugin_data:
raise ValueError("The value lacks plugin data.")
try:
content = json.loads(compat.as_text(summary_metadata.plugin_data.content))
except ValueError as err:
raise ValueError("Could not parse content into JSON: %r, %r" % (content,
err))
device_name = content["device"]
dump_full_path = _get_dump_file_path(
self._dump_dir, device_name, value.node_name)
self._try_makedirs(os.path.dirname(dump_full_path))
with open(dump_full_path, "wb") as f:
f.write(event.SerializeToString())
class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer):
"""An implementation of EventListenerBaseServicer for testing."""
def __init__(self, server_port, dump_dir, toggle_watch_on_core_metadata=None):
"""Constructor of EventListenerTestServicer.
Args:
server_port: (int) The server port number.
dump_dir: (str) The root directory to which the data files will be
dumped. If empty or None, the received debug data will not be dumped
to the file system: they will be stored in memory instead.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
"""
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
self._initialize_toggle_watch_state(toggle_watch_on_core_metadata)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, server_port,
functools.partial(EventListenerTestStreamHandler, dump_dir, self))
# Members for storing the graph ops traceback and source files.
self._call_types = []
self._call_keys = []
self._origin_stacks = []
self._origin_id_to_strings = []
self._graph_tracebacks = []
self._graph_versions = []
self._source_files = []
def _initialize_toggle_watch_state(self, toggle_watches):
self._toggle_watches = toggle_watches
self._toggle_watch_state = {}
if self._toggle_watches:
for watch_key in self._toggle_watches:
self._toggle_watch_state[watch_key] = False
def toggle_watch(self):
for watch_key in self._toggle_watch_state:
node_name, output_slot, debug_op = watch_key
if self._toggle_watch_state[watch_key]:
self.request_unwatch(node_name, output_slot, debug_op)
else:
self.request_watch(node_name, output_slot, debug_op)
self._toggle_watch_state[watch_key] = (
not self._toggle_watch_state[watch_key])
def clear_data(self):
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
self._call_types = []
self._call_keys = []
self._origin_stacks = []
self._origin_id_to_strings = []
self._graph_tracebacks = []
self._graph_versions = []
self._source_files = []
def SendTracebacks(self, request, context):
self._call_types.append(request.call_type)
self._call_keys.append(request.call_key)
self._origin_stacks.append(request.origin_stack)
self._origin_id_to_strings.append(request.origin_id_to_string)
self._graph_tracebacks.append(request.graph_traceback)
self._graph_versions.append(request.graph_version)
return debug_service_pb2.EventReply()
def SendSourceFiles(self, request, context):
self._source_files.append(request)
return debug_service_pb2.EventReply()
def query_op_traceback(self, op_name):
"""Query the traceback of an op.
Args:
op_name: Name of the op to query.
Returns:
The traceback of the op, as a list of 3-tuples:
(filename, lineno, function_name)
Raises:
ValueError: If the op cannot be found in the tracebacks received by the
server so far.
"""
for op_log_proto in self._graph_tracebacks:
for log_entry in op_log_proto.log_entries:
if log_entry.name == op_name:
return self._code_def_to_traceback(log_entry.code_def,
op_log_proto.id_to_string)
raise ValueError(
"Op '%s' does not exist in the tracebacks received by the debug "
"server." % op_name)
def query_origin_stack(self):
"""Query the stack of the origin of the execution call.
Returns:
A `list` of all tracebacks. Each item corresponds to an execution call,
i.e., a `SendTracebacks` request. Each item is a `list` of 3-tuples:
(filename, lineno, function_name).
"""
ret = []
for stack, id_to_string in zip(
self._origin_stacks, self._origin_id_to_strings):
ret.append(self._code_def_to_traceback(stack, id_to_string))
return ret
def query_call_types(self):
return self._call_types
def query_call_keys(self):
return self._call_keys
def query_graph_versions(self):
return self._graph_versions
def query_source_file_line(self, file_path, lineno):
"""Query the content of a given line in a source file.
Args:
file_path: Path to the source file.
lineno: Line number as an `int`.
Returns:
Content of the line as a string.
Raises:
ValueError: If no source file is found at the given file_path.
"""
if not self._source_files:
raise ValueError(
"This debug server has not received any source file contents yet.")
for source_files in self._source_files:
for source_file_proto in source_files.source_files:
if source_file_proto.file_path == file_path:
return source_file_proto.lines[lineno - 1]
raise ValueError(
"Source file at path %s has not been received by the debug server",
file_path)
def _code_def_to_traceback(self, code_def, id_to_string):
return [(id_to_string[trace.file_id],
trace.lineno,
id_to_string[trace.function_id]) for trace in code_def.traces]
def start_server_on_separate_thread(dump_to_filesystem=True,
server_start_delay_sec=0.0,
poll_server=False,
blocking=True,
toggle_watch_on_core_metadata=None):
"""Create a test gRPC debug server and run on a separate thread.
Args:
dump_to_filesystem: (bool) whether the debug server will dump debug data
to the filesystem.
server_start_delay_sec: (float) amount of time (in sec) to delay the server
start up for.
poll_server: (bool) whether the server will be polled till success on
startup.
blocking: (bool) whether the server should be started in a blocking mode.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
Returns:
server_port: (int) Port on which the server runs.
debug_server_url: (str) grpc:// URL to the server.
server_dump_dir: (str) The debug server's dump directory.
server_thread: The server Thread object.
server: The `EventListenerTestServicer` object.
Raises:
ValueError: If polling the server process for ready state is not successful
within maximum polling count.
"""
server_port = portpicker.pick_unused_port()
debug_server_url = "grpc://localhost:%d" % server_port
server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None
server = EventListenerTestServicer(
server_port=server_port,
dump_dir=server_dump_dir,
toggle_watch_on_core_metadata=toggle_watch_on_core_metadata)
def delay_then_run_server():
time.sleep(server_start_delay_sec)
server.run_server(blocking=blocking)
server_thread = threading.Thread(target=delay_then_run_server)
server_thread.start()
if poll_server:
if not _poll_server_till_success(
50,
0.2,
debug_server_url,
server_dump_dir,
server,
gpu_memory_fraction=0.1):
raise ValueError(
"Failed to start test gRPC debug server at port %d" % server_port)
server.clear_data()
return server_port, debug_server_url, server_dump_dir, server_thread, server
def _poll_server_till_success(max_attempts,
sleep_per_poll_sec,
debug_server_url,
dump_dir,
server,
gpu_memory_fraction=1.0):
"""Poll server until success or exceeding max polling count.
Args:
max_attempts: (int) How many times to poll at maximum
sleep_per_poll_sec: (float) How many seconds to sleep for after each
unsuccessful poll.
debug_server_url: (str) gRPC URL to the debug server.
dump_dir: (str) Dump directory to look for files in. If None, will directly
check data from the server object.
server: The server object.
gpu_memory_fraction: (float) Fraction of GPU memory to be
allocated for the Session used in server polling.
Returns:
(bool) Whether the polling succeeded within max_polls attempts.
"""
poll_count = 0
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction))
with session.Session(config=config) as sess:
for poll_count in range(max_attempts):
server.clear_data()
print("Polling: poll_count = %d" % poll_count)
x_init_name = "x_init_%d" % poll_count
x_init = constant_op.constant([42.0], shape=[1], name=x_init_name)
x = variables.Variable(x_init, name=x_init_name)
run_options = config_pb2.RunOptions()
debug_utils.add_debug_tensor_watch(
run_options, x_init_name, 0, debug_urls=[debug_server_url])
try:
sess.run(x.initializer, options=run_options)
except errors.FailedPreconditionError:
pass
if dump_dir:
if os.path.isdir(
dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0:
shutil.rmtree(dump_dir)
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
else:
if server.debug_tensor_values:
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
return False
|
tensorflow-master
|
tensorflow/python/debug/lib/grpc_debug_test_server.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common values and methods for TensorFlow Debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
GRPC_URL_PREFIX = "grpc://"
# A key for a Session.run() call.
RunKey = collections.namedtuple("RunKey", ["feed_names", "fetch_names"])
def get_graph_element_name(elem):
"""Obtain the name or string representation of a graph element.
If the graph element has the attribute "name", return name. Otherwise, return
a __str__ representation of the graph element. Certain graph elements, such as
`SparseTensor`s, do not have the attribute "name".
Args:
elem: The graph element in question.
Returns:
If the attribute 'name' is available, return the name. Otherwise, return
str(fetch).
"""
return elem.name if hasattr(elem, "name") else str(elem)
def get_flattened_names(feeds_or_fetches):
"""Get a flattened list of the names in run() call feeds or fetches.
Args:
feeds_or_fetches: Feeds or fetches of the `Session.run()` call. It maybe
a Tensor, an Operation or a Variable. It may also be nested lists, tuples
or dicts. See doc of `Session.run()` for more details.
Returns:
(list of str) A flattened list of fetch names from `feeds_or_fetches`.
"""
lines = []
if isinstance(feeds_or_fetches, (list, tuple)):
for item in feeds_or_fetches:
lines.extend(get_flattened_names(item))
elif isinstance(feeds_or_fetches, dict):
for key in feeds_or_fetches:
lines.extend(get_flattened_names(feeds_or_fetches[key]))
else:
# This ought to be a Tensor, an Operation or a Variable, for which the name
# attribute should be available. (Bottom-out condition of the recursion.)
lines.append(get_graph_element_name(feeds_or_fetches))
return lines
def get_run_key(feed_dict, fetches):
"""Summarize the names of feeds and fetches as a RunKey JSON string.
Args:
feed_dict: The feed_dict given to the `Session.run()` call.
fetches: The fetches from the `Session.run()` call.
Returns:
A JSON Array consisting of two items. They first items is a flattened
Array of the names of the feeds. The second item is a flattened Array of
the names of the fetches.
"""
return json.dumps(RunKey(get_flattened_names(feed_dict),
get_flattened_names(fetches)))
|
tensorflow-master
|
tensorflow/python/debug/lib/common.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow Debugger (tfdbg) Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class DebugUtilsTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._sess = session.Session()
with cls._sess:
cls._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
cls._b_init_val = np.array([[2.0], [-1.0]])
cls._c_val = np.array([[-4.0], [np.nan]])
cls._a_init = constant_op.constant(
cls._a_init_val, shape=[2, 2], name="a1_init")
cls._b_init = constant_op.constant(
cls._b_init_val, shape=[2, 1], name="b_init")
cls._a = variables.VariableV1(cls._a_init, name="a1")
cls._b = variables.VariableV1(cls._b_init, name="b")
cls._c = constant_op.constant(cls._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
cls._p = math_ops.matmul(cls._a, cls._b, name="p1")
# Sum of two vectors.
cls._s = math_ops.add(cls._p, cls._c, name="s")
cls._graph = cls._sess.graph
# These are all the expected nodes in the graph:
# Two variables (a, b), each with four nodes (Variable, init, Assign,
# read).
# One constant (c).
# One add operation and one matmul operation.
cls._expected_num_nodes = 4 * 2 + 1 + 1 + 1
def setUp(self):
self._run_options = config_pb2.RunOptions()
def _verify_watches(self, watch_opts, expected_output_slot,
expected_debug_ops, expected_debug_urls):
"""Verify a list of debug tensor watches.
This requires all watches in the watch list have exactly the same
output_slot, debug_ops and debug_urls.
Args:
watch_opts: Repeated protobuf field of DebugTensorWatch.
expected_output_slot: Expected output slot index, as an integer.
expected_debug_ops: Expected debug ops, as a list of strings.
expected_debug_urls: Expected debug URLs, as a list of strings.
Returns:
List of node names from the list of debug tensor watches.
"""
node_names = []
for watch in watch_opts:
node_names.append(watch.node_name)
self.assertEqual(expected_output_slot, watch.output_slot)
self.assertEqual(expected_debug_ops, watch.debug_ops)
self.assertEqual(expected_debug_urls, watch.debug_urls)
return node_names
def testAddDebugTensorWatches_defaultDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_a", 1, debug_urls="file:///tmp/tfdbg_1")
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_b", 0, debug_urls="file:///tmp/tfdbg_2")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(2, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
watch_1 = debug_watch_opts[1]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(1, watch_0.output_slot)
self.assertEqual("foo/node_b", watch_1.node_name)
self.assertEqual(0, watch_1.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugIdentity"], watch_0.debug_ops)
self.assertEqual(["DebugIdentity"], watch_1.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
self.assertEqual(["file:///tmp/tfdbg_2"], watch_1.debug_urls)
def testAddDebugTensorWatches_explicitDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleDebugOps(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops=["DebugNanCount", "DebugIdentity"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount", "DebugIdentity"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleURLs(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls=["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"])
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"],
watch_0.debug_urls)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_allNodes(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_ops=["DebugIdentity", "DebugNanCount"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(self._expected_num_nodes, len(debug_watch_opts))
# Verify that each of the nodes in the graph with output tensors in the
# graph have debug tensor watch.
node_names = self._verify_watches(debug_watch_opts, 0,
["DebugIdentity", "DebugNanCount"],
["file:///tmp/tfdbg_1"])
# Verify the node names.
self.assertTrue("a1_init" in node_names)
self.assertTrue("a1" in node_names)
self.assertTrue("a1/Assign" in node_names)
self.assertTrue("a1/read" in node_names)
self.assertTrue("b_init" in node_names)
self.assertTrue("b" in node_names)
self.assertTrue("b/Assign" in node_names)
self.assertTrue("b/read" in node_names)
self.assertTrue("c" in node_names)
self.assertTrue("p1" in node_names)
self.assertTrue("s" in node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameWhitelist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_whitelist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]),
sorted(node_names))
@test_util.run_v1_only("b/120545219")
def testWatchGraph_opTypeWhitelist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_whitelist="(Variable|MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["a1", "b", "p1"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeWhitelists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_whitelist="([a-z]+1$)",
op_type_regex_whitelist="(MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["p1"], node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_tensorDTypeWhitelist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_whitelist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign", "b", "b/Assign"], node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndTensorDTypeWhitelists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_whitelist="^a.*",
tensor_dtype_regex_whitelist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign"], node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameBlacklist(self):
debug_utils.watch_graph_with_blacklists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]),
sorted(node_names))
@test_util.run_v1_only("b/120545219")
def testWatchGraph_opTypeBlacklist(self):
debug_utils.watch_graph_with_blacklists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_blacklist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["p1", "s"]), sorted(node_names))
@test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndOpTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="p1$",
op_type_regex_blacklist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["s"], node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_tensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_blacklist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertIn("s", node_names)
@test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndTensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="^s$",
tensor_dtype_regex_blacklist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertNotIn("s", node_names)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_utils_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class GuessIsTensorFlowLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self.curr_file_path = os.path.normpath(os.path.abspath(__file__))
def tearDown(self):
ops.reset_default_graph()
def testGuessedBaseDirIsProbablyCorrect(self):
# In the non-pip world, code resides in "tensorflow/"
# In the pip world, after virtual pip, code resides in "tensorflow_core/"
# So, we have to check both of them
self.assertIn(
os.path.basename(source_utils._TENSORFLOW_BASEDIR),
["tensorflow", "tensorflow_core"])
def testUnitTestFileReturnsFalse(self):
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(self.curr_file_path))
def testSourceUtilModuleReturnsTrue(self):
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(source_utils.__file__))
@test_util.run_deprecated_v1
def testFileInPythonKernelsPathReturnsTrue(self):
x = constant_op.constant(42.0, name="x")
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(x.op.traceback[-1][0]))
def testNonPythonFileRaisesException(self):
with self.assertRaisesRegexp(ValueError, r"is not a Python source file"):
source_utils.guess_is_tensorflow_py_library(
os.path.join(os.path.dirname(self.curr_file_path), "foo.cc"))
class SourceHelperTest(test_util.TensorFlowTestCase):
def createAndRunGraphHelper(self):
"""Create and run a TensorFlow Graph to generate debug dumps.
This is intentionally done in separate method, to make it easier to test
the stack-top mode of source annotation.
"""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
self.u_init = constant_op.constant(
np.array([[5.0, 3.0], [-1.0, 0.0]]), shape=[2, 2], name="u_init")
self.u_init_line_number = line_number_above()
self.u = variables.Variable(self.u_init, name="u")
self.u_line_number = line_number_above()
self.v_init = constant_op.constant(
np.array([[2.0], [-1.0]]), shape=[2, 1], name="v_init")
self.v_init_line_number = line_number_above()
self.v = variables.Variable(self.v_init, name="v")
self.v_line_number = line_number_above()
self.w = math_ops.matmul(self.u, self.v, name="w")
self.w_line_number = line_number_above()
self.evaluate(self.u.initializer)
self.evaluate(self.v.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(self.w, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphHelper()
self.helper_line_number = line_number_above()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root)
ops.reset_default_graph()
def testAnnotateWholeValidSourceFileGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(self.dump,
self.curr_file_path)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the non-stack-top (default) mode, the helper line should be annotated
# with all the ops as well.
self.assertIn(self.u_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.u.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.v.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.op.name, source_annotation[self.helper_line_number])
def testAnnotateWithStackTopGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, file_stack_top=True)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the stack-top mode, the helper line should not have been annotated.
self.assertNotIn(self.helper_line_number, source_annotation)
def testAnnotateSubsetOfLinesGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump,
self.curr_file_path,
min_line=self.u_line_number,
max_line=self.u_line_number + 1)
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v_line_number, source_annotation)
def testAnnotateDumpedTensorsGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, do_dumped_tensors=True)
# Note: Constant Tensors u_init and v_init may not get dumped due to
# constant-folding.
self.assertIn(self.u.name, source_annotation[self.u_line_number])
self.assertIn(self.v.name, source_annotation[self.v_line_number])
self.assertIn(self.w.name, source_annotation[self.w_line_number])
self.assertNotIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertNotIn(self.w.op.name, source_annotation[self.w_line_number])
self.assertIn(self.u.name, source_annotation[self.helper_line_number])
self.assertIn(self.v.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.name, source_annotation[self.helper_line_number])
def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self):
self.dump.set_python_graph(None)
with self.assertRaises(ValueError):
source_utils.annotate_source(self.dump, self.curr_file_path)
def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self):
# Create an unrelated source file.
unrelated_source_path = tempfile.mktemp()
with open(unrelated_source_path, "wt") as source_file:
source_file.write("print('hello, world')\n")
self.assertEqual({},
source_utils.annotate_source(self.dump,
unrelated_source_path))
# Clean up unrelated source file.
os.remove(unrelated_source_path)
@test_util.run_v1_only("b/120545219")
class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase):
def createAndRunGraphWithWhileLoop(self):
"""Create and run a TensorFlow Graph with a while loop to generate dumps."""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
self.traceback_first_line = line_number_above()
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(loop, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphWithWhileLoop()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root)
ops.reset_default_graph()
def testGenerateSourceList(self):
source_list = source_utils.list_source_files_against_dump(self.dump)
# Assert that the file paths are sorted and unique.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 6.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# The while loop body should have executed 3 times. The following table
# lists the tensors and how many times each of them is dumped.
# Tensor name # of times dumped:
# i:0 1
# while/Enter:0 1
# while/Merge:0 4
# while/Merge:1 4
# while/Less/y:0 4
# while/Less:0 4
# while/LoopCond:0 4
# while/Switch:0 1
# while/Swtich:1 3
# while/Identity:0 3
# while/Add/y:0 3
# while/Add:0 3
# while/NextIteration:0 3
# while/Exit:0 1
# ----------------------------
# (Total) 39
#
# The total number of nodes is 12.
# The total number of tensors is 14 (2 of the nodes have 2 outputs:
# while/Merge, while/Switch).
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, first_line = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
def testGenerateSourceListWithNodeNameFilter(self):
source_list = source_utils.list_source_files_against_dump(
self.dump, node_name_regex_whitelist=r"while/Add.*")
# Assert that the file paths are sorted.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 4.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# Due to the node-name filtering the result should only contain 2 nodes
# and 2 tensors. The total number of dumped tensors should be 6:
# while/Add/y:0 3
# while/Add:0 3
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, _ = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(2, num_nodes)
self.assertEqual(2, num_tensors)
self.assertEqual(6, num_dumps)
def testGenerateSourceListWithPathRegexFilter(self):
curr_file_basename = os.path.basename(self.curr_file_path)
source_list = source_utils.list_source_files_against_dump(
self.dump,
path_regex_whitelist=(
".*" + curr_file_basename.replace(".", "\\.") + "$"))
self.assertEqual(1, len(source_list))
(file_path, is_tf_py_library, num_nodes, num_tensors, num_dumps,
first_line) = source_list[0]
self.assertEqual(self.curr_file_path, file_path)
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/source_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the reconstruction of non-debugger-decorated GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
super(ReconstructNonDebugGraphTest, self).setUp()
self._dump_dir = tempfile.mkdtemp()
self._debug_url = "file://" + self._dump_dir
ops.reset_default_graph()
def tearDown(self):
shutil.rmtree(self._dump_dir)
super(ReconstructNonDebugGraphTest, self).tearDown()
def _graphDefWithoutBlacklistedNodes(self, graph_def):
output_graph_def = graph_pb2.GraphDef()
for node in graph_def.node:
if node.op not in self._OP_TYPE_BLACKLIST:
new_node = output_graph_def.node.add()
new_node.CopyFrom(node)
if new_node.op == "Enter":
# The debugger sets parallel_iterations attribute of while-loop Enter
# nodes to 1 for debugging.
for attr_key in new_node.attr:
if attr_key == "parallel_iterations":
new_node.attr[attr_key].i = 1
elif new_node.op == "Switch":
# We don't check the inputs to Switch ops as their inputs may be
# Send/Recv nodes.
del new_node.input[:]
return output_graph_def
def _compareOriginalAndReconstructedGraphDefs(self,
sess,
fetches,
feed_dict=None,
expected_output=None):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
non_debug_graph_defs = run_metadata.partition_graphs
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_url)
run_metadata = config_pb2.RunMetadata()
output = sess.run(fetches, feed_dict=feed_dict, options=run_options,
run_metadata=run_metadata)
if expected_output is not None:
self.assertAllClose(expected_output, output)
dump = debug_data.DebugDumpDir(
self._dump_dir, partition_graphs=run_metadata.partition_graphs,
validate=True)
reconstructed = dump.reconstructed_non_debug_partition_graphs()
self.assertEqual(len(non_debug_graph_defs), len(reconstructed))
for i, non_debug_graph_def in enumerate(non_debug_graph_defs):
device_name = debug_graphs._infer_device_name(non_debug_graph_def)
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed[device_name]),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
# Test debug_graphs.reconstruct_non_debug_graph_def.
reconstructed_again = (
debug_graphs.reconstruct_non_debug_graph_def(
run_metadata.partition_graphs[i]))
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed_again),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
def testReconstructSimpleGraph(self):
with session.Session() as sess:
u = variables.Variable([12.0], name="u")
v = variables.Variable([30.0], name="v")
w = math_ops.add(u, v, name="w")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, w, expected_output=[42.0])
def testReconstructGraphWithControlEdge(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
with ops.control_dependencies([a]):
b = math_ops.add(a, a, name="b")
with ops.control_dependencies([a, b]):
c = math_ops.multiply(b, b, name="c")
self.evaluate(a.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, c, expected_output=400.0)
def testReonstructGraphWithCond(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self._compareOriginalAndReconstructedGraphDefs(
sess, cond, expected_output=21.0)
def testReconstructGraphWithWhileLoop(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
self._compareOriginalAndReconstructedGraphDefs(sess, loop)
def testReconstructGraphWithGradients(self):
with session.Session(config=self._no_rewrite_session_config()) as sess:
u = variables.Variable(12.0, name="u")
v = variables.Variable(30.0, name="v")
x = constant_op.constant(1.1, name="x")
toy_loss = x * (u - v)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
self.evaluate(u.initializer)
self.evaluate(v.initializer)
self._compareOriginalAndReconstructedGraphDefs(sess, train_op)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_graph_reconstruction_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.compat.v1.Session with grpc:// URLs.
This test file focuses on the grpc:// debugging of local (non-distributed)
tf.Sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class GrpcDebugServerTest(test_util.TensorFlowTestCase):
def testRepeatedRunServerRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
# The server is started asynchronously. It needs to be polled till its state
# has become started.
with self.assertRaisesRegexp(
ValueError, "Server has already started running"):
server.run_server()
server.stop_server().wait()
server_thread.join()
def testRepeatedStopServerRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
server.stop_server().wait()
server_thread.join()
with self.assertRaisesRegexp(ValueError, "Server has already stopped"):
server.stop_server().wait()
def testRunServerAfterStopRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
server.stop_server().wait()
server_thread.join()
with self.assertRaisesRegexp(ValueError, "Server has already stopped"):
server.run_server()
def testStartServerWithoutBlocking(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True, blocking=False)
# The thread that starts the server shouldn't block, so we should be able to
# join it before stopping the server.
server_thread.join()
server.stop_server().wait()
@test_util.run_v1_only("b/120545219")
class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):
@classmethod
def setUpClass(cls):
session_debug_testlib.SessionDebugTestBase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
session_debug_testlib.SessionDebugTestBase.tearDownClass()
def setUp(self):
# Override the dump root as the test server's dump directory.
self._dump_root = self._server_dump_dir
def tearDown(self):
if os.path.isdir(self._server_dump_dir):
shutil.rmtree(self._server_dump_dir)
session_debug_testlib.SessionDebugTestBase.tearDown(self)
def _debug_urls(self, run_number=None):
return ["grpc://localhost:%d" % self._server_port]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaisesRegexp(
TypeError, "Expected type str or list in grpc_debug_server_addresses"):
grpc_wrapper.GrpcDebugWrapperSession(sess, 1337)
def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException2(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaisesRegexp(
TypeError, "Expected type str in list grpc_debug_server_addresses"):
grpc_wrapper.GrpcDebugWrapperSession(sess, ["localhost:1337", 1338])
def testUseInvalidWatchFnTypeWithGrpcDebugWrapperSessionRaisesException(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaises(TypeError):
grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port, watch_fn="foo")
def testGrpcDebugWrapperSessionWithoutWatchFnWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port)
w_result = sess.run(w)
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(5, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u", 0, "DebugIdentity"))
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertAllClose([20.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertAllClose([42.0], dump.get_tensors("w", 0, "DebugIdentity"))
def testGrpcDebugWrapperSessionWithWatchFnWorks(self):
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return ["DebugIdentity", "DebugNumericSummary"], r".*/read", None
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port, watch_fn=watch_fn)
w_result = sess.run(w)
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(4, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("u/read", 0, "DebugNumericSummary")[0]))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("v/read", 0, "DebugNumericSummary")[0]))
def testGrpcDebugHookWithStatelessWatchFnWorks(self):
# Perform some set up. Specifically, construct a simple TensorFlow graph and
# create a watch function for certain ops.
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r".*/read",
op_type_regex_whitelist=None,
tolerate_debug_op_creation_failures=True)
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
# Create a hook. One could use this hook with say a tflearn Estimator.
# However, we use a HookedSession in this test to avoid depending on the
# internal implementation of Estimators.
grpc_debug_hook = hooks.GrpcDebugHook(
["localhost:%d" % self._server_port], watch_fn=watch_fn)
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Run the hooked session. This should stream tensor data to the GRPC
# endpoints.
w_result = sess.run(w)
# Verify that the hook monitored the correct tensors.
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(4, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("u/read", 0, "DebugNumericSummary")[0]))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("v/read", 0, "DebugNumericSummary")[0]))
def testTensorBoardDebugHookWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
grpc_debug_hook = hooks.TensorBoardDebugHook(
["localhost:%d" % self._server_port])
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Activate watch point on a tensor before calling sess.run().
self._server.request_watch("u/read", 0, "DebugIdentity")
self.assertAllClose(42.0, sess.run(w))
# self.assertAllClose(42.0, sess.run(w))
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
# Check that the server has received the stack trace.
self.assertTrue(self._server.query_op_traceback("u"))
self.assertTrue(self._server.query_op_traceback("u/read"))
self.assertTrue(self._server.query_op_traceback("v"))
self.assertTrue(self._server.query_op_traceback("v/read"))
self.assertTrue(self._server.query_op_traceback("w"))
# Check that the server has received the python file content.
# Query an arbitrary line to make sure that is the case.
with open(__file__, "rt") as this_source_file:
first_line = this_source_file.readline().strip()
self.assertEqual(
first_line, self._server.query_source_file_line(__file__, 1))
self._server.clear_data()
# Call sess.run() again, and verify that this time the traceback and source
# code is not sent, because the graph version is not newer.
self.assertAllClose(42.0, sess.run(w))
with self.assertRaises(ValueError):
self._server.query_op_traceback("delta_1")
with self.assertRaises(ValueError):
self._server.query_source_file_line(__file__, 1)
def testTensorBoardDebugHookDisablingTracebackSourceCodeSendingWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(variables.global_variables_initializer())
grpc_debug_hook = hooks.TensorBoardDebugHook(
["localhost:%d" % self._server_port],
send_traceback_and_source_code=False)
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Activate watch point on a tensor before calling sess.run().
self._server.request_watch("u/read", 0, "DebugIdentity")
self.assertAllClose(42.0, sess.run(w))
# Check that the server has _not_ received any tracebacks, as a result of
# the disabling above.
with self.assertRaisesRegexp(
ValueError, r"Op .*u/read.* does not exist"):
self.assertTrue(self._server.query_op_traceback("u/read"))
with self.assertRaisesRegexp(
ValueError, r".* has not received any source file"):
self._server.query_source_file_line(__file__, 1)
def testConstructGrpcDebugHookWithOrWithouGrpcInUrlWorks(self):
hooks.GrpcDebugHook(["grpc://foo:42424"])
hooks.GrpcDebugHook(["foo:42424"])
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
@classmethod
def setUpClass(cls):
session_debug_testlib.SessionDebugTestBase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
session_debug_testlib.SessionDebugTestBase.tearDownClass()
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for i in range(self._num_concurrent_runs):
self._dump_roots.append(
os.path.join(self._server_dump_dir, "thread%d" % i))
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._server_dump_dir):
shutil.rmtree(self._server_dump_dir)
def _get_concurrent_debug_urls(self):
urls = []
for i in range(self._num_concurrent_runs):
urls.append(self._debug_server_url + "/thread%d" % i)
return urls
@test_util.run_v1_only("b/120545219")
class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):
"""Test server gating of debug ops."""
@classmethod
def setUpClass(cls):
(cls._server_port_1, cls._debug_server_url_1, _, cls._server_thread_1,
cls._server_1) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
(cls._server_port_2, cls._debug_server_url_2, _, cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
cls._servers_and_threads = [(cls._server_1, cls._server_thread_1),
(cls._server_2, cls._server_thread_2)]
@classmethod
def tearDownClass(cls):
for server, thread in cls._servers_and_threads:
server.stop_server().wait()
thread.join()
def tearDown(self):
ops.reset_default_graph()
self._server_1.clear_data()
self._server_2.clear_data()
def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenDebugNodes(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_1")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)",
"DebugNumericSummary(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
for i in xrange(4):
self._server_1.clear_data()
if i % 2 == 0:
self._server_1.request_watch("delta_1", 0, "DebugIdentity")
self._server_1.request_watch("delta_2", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_1", 0, "DebugNumericSummary")
self._server_1.request_unwatch("delta_2", 0, "DebugNumericSummary")
else:
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
self._server_1.request_watch("delta_1", 0, "DebugNumericSummary")
self._server_1.request_watch("delta_2", 0, "DebugNumericSummary")
sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
# Watched debug tensors are:
# Run 0: delta_[1,2]:0:DebugIdentity
# Run 1: delta_[1,2]:0:DebugNumericSummary
# Run 2: delta_[1,2]:0:DebugIdentity
# Run 3: delta_[1,2]:0:DebugNumericSummary
self.assertEqual(2, len(self._server_1.debug_tensor_values))
if i % 2 == 0:
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
else:
self.assertAllClose(
[[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 5.0, 5.0, 5.0,
0.0, 1.0, 0.0]],
self._server_1.debug_tensor_values[
"delta_1:0:DebugNumericSummary"])
self.assertAllClose(
[[1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -5.0, -5.0, -5.0,
0.0, 1.0, 0.0]],
self._server_1.debug_tensor_values[
"delta_2:0:DebugNumericSummary"])
def testToggleWatchesOnCoreMetadata(self):
(_, debug_server_url, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False,
toggle_watch_on_core_metadata=[("toggled_1", 0, "DebugIdentity"),
("toggled_2", 0, "DebugIdentity")])
self._servers_and_threads.append((server, server_thread))
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_1")
# These two nodes have names that match those in the
# toggle_watch_on_core_metadata argument used when calling
# start_server_on_separate_thread().
toggled_1 = constant_op.constant(5.0, name="toggled_1")
toggled_2 = constant_op.constant(-5.0, name="toggled_2")
inc_v_1 = state_ops.assign_add(v_1, toggled_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, toggled_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[debug_server_url])
for i in xrange(4):
server.clear_data()
sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
if i % 2 == 0:
self.assertEqual(2, len(server.debug_tensor_values))
self.assertAllClose(
[5.0],
server.debug_tensor_values["toggled_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
server.debug_tensor_values["toggled_2:0:DebugIdentity"])
else:
self.assertEqual(0, len(server.debug_tensor_values))
def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenServers(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v = variables.VariableV1(50.0, name="v")
delta = constant_op.constant(5.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(v.initializer)
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1, self._debug_server_url_2])
for i in xrange(4):
self._server_1.clear_data()
self._server_2.clear_data()
if i % 2 == 0:
self._server_1.request_watch("delta", 0, "DebugIdentity")
self._server_2.request_watch("v", 0, "DebugIdentity")
else:
self._server_1.request_unwatch("delta", 0, "DebugIdentity")
self._server_2.request_unwatch("v", 0, "DebugIdentity")
sess.run(inc_v, options=run_options, run_metadata=run_metadata)
if i % 2 == 0:
self.assertEqual(1, len(self._server_1.debug_tensor_values))
self.assertEqual(1, len(self._server_2.debug_tensor_values))
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta:0:DebugIdentity"])
self.assertAllClose(
[50 + 5.0 * i],
self._server_2.debug_tensor_values["v:0:DebugIdentity"])
else:
self.assertEqual(0, len(self._server_1.debug_tensor_values))
self.assertEqual(0, len(self._server_2.debug_tensor_values))
def testToggleBreakpointsWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
for i in xrange(4):
self._server_1.clear_data()
if i in (0, 2):
# Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
self._server_1.request_watch(
"delta_2", 0, "DebugIdentity", breakpoint=True)
else:
# Disable the breakpoint in runs 1 and 3.
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
output = sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
if i in (0, 2):
# During runs 0 and 2, the server should have received the published
# debug tensor delta:0:DebugIdentity. The breakpoint should have been
# unblocked by EventReply reponses from the server.
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
# After the runs, the server should have properly registered the
# breakpoints due to the request_unwatch calls.
self.assertSetEqual({("delta_1", 0, "DebugIdentity"),
("delta_2", 0, "DebugIdentity")},
self._server_1.breakpoints)
else:
# After the end of runs 1 and 3, the server has received the requests
# to disable the breakpoint at delta:0:DebugIdentity.
self.assertSetEqual(set(), self._server_1.breakpoints)
def testTensorBoardDebuggerWrapperToggleBreakpointsWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
# The TensorBoardDebugWrapperSession should add a DebugIdentity debug op
# with attribute gated_grpc=True for every tensor in the graph.
sess = grpc_wrapper.TensorBoardDebugWrapperSession(
sess, self._debug_server_url_1)
for i in xrange(4):
self._server_1.clear_data()
if i in (0, 2):
# Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
self._server_1.request_watch(
"delta_2", 0, "DebugIdentity", breakpoint=True)
else:
# Disable the breakpoint in runs 1 and 3.
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
output = sess.run([inc_v_1, inc_v_2])
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
if i in (0, 2):
# During runs 0 and 2, the server should have received the published
# debug tensor delta:0:DebugIdentity. The breakpoint should have been
# unblocked by EventReply reponses from the server.
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
# After the runs, the server should have properly registered the
# breakpoints.
else:
# After the end of runs 1 and 3, the server has received the requests
# to disable the breakpoint at delta:0:DebugIdentity.
self.assertSetEqual(set(), self._server_1.breakpoints)
if i == 0:
# Check that the server has received the stack trace.
self.assertTrue(self._server_1.query_op_traceback("delta_1"))
self.assertTrue(self._server_1.query_op_traceback("delta_2"))
self.assertTrue(self._server_1.query_op_traceback("inc_v_1"))
self.assertTrue(self._server_1.query_op_traceback("inc_v_2"))
# Check that the server has received the python file content.
# Query an arbitrary line to make sure that is the case.
with open(__file__, "rt") as this_source_file:
first_line = this_source_file.readline().strip()
self.assertEqual(
first_line, self._server_1.query_source_file_line(__file__, 1))
else:
# In later Session.run() calls, the traceback shouldn't have been sent
# because it is already sent in the 1st call. So calling
# query_op_traceback() should lead to an exception, because the test
# debug server clears the data at the beginning of every iteration.
with self.assertRaises(ValueError):
self._server_1.query_op_traceback("delta_1")
with self.assertRaises(ValueError):
self._server_1.query_source_file_line(__file__, 1)
def testTensorBoardDebuggerWrapperDisablingTracebackSourceSendingWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run(variables.global_variables_initializer())
# Disable the sending of traceback and source code.
sess = grpc_wrapper.TensorBoardDebugWrapperSession(
sess, self._debug_server_url_1, send_traceback_and_source_code=False)
for i in xrange(4):
self._server_1.clear_data()
if i == 0:
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
output = sess.run([inc_v_1, inc_v_2])
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
# No op traceback or source code should have been received by the debug
# server due to the disabling above.
with self.assertRaisesRegexp(
ValueError, r"Op .*delta_1.* does not exist"):
self.assertTrue(self._server_1.query_op_traceback("delta_1"))
with self.assertRaisesRegexp(
ValueError, r".* has not received any source file"):
self._server_1.query_source_file_line(__file__, 1)
def testGetGrpcDebugWatchesReturnsCorrectAnswer(self):
with session.Session() as sess:
v = variables.VariableV1(50.0, name="v")
delta = constant_op.constant(5.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(v.initializer)
# Before any debugged runs, the server should be aware of no debug
# watches.
self.assertEqual([], self._server_1.gated_grpc_debug_watches())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.add_debug_tensor_watch(
run_options, "delta", output_slot=0,
debug_ops=["DebugNumericSummary(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
debug_utils.add_debug_tensor_watch(
run_options, "v", output_slot=0,
debug_ops=["DebugIdentity"],
debug_urls=[self._debug_server_url_1])
sess.run(inc_v, options=run_options, run_metadata=run_metadata)
# After the first run, the server should have noted the debug watches
# for which gated_grpc == True, but not the ones with gated_grpc == False.
self.assertEqual(1, len(self._server_1.gated_grpc_debug_watches()))
debug_watch = self._server_1.gated_grpc_debug_watches()[0]
self.assertEqual("delta", debug_watch.node_name)
self.assertEqual(0, debug_watch.output_slot)
self.assertEqual("DebugNumericSummary", debug_watch.debug_op)
@test_util.run_v1_only("b/120545219")
class DelayedDebugServerTest(test_util.TensorFlowTestCase):
def testDebuggedSessionRunWorksWithDelayedDebugServerStartup(self):
"""Test debugged Session.run() tolerates delayed debug server startup."""
ops.reset_default_graph()
# Start a debug server asynchronously, with a certain amount of delay.
(debug_server_port, _, _, server_thread,
debug_server) = grpc_debug_test_server.start_server_on_separate_thread(
server_start_delay_sec=2.0, dump_to_filesystem=False)
with self.cached_session() as sess:
a_init = constant_op.constant(42.0, name="a_init")
a = variables.VariableV1(a_init, name="a")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(debug_ops=["DebugIdentity"])
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % debug_server_port, watch_fn=watch_fn)
sess.run(a.initializer)
self.assertAllClose(
[42.0], debug_server.debug_tensor_values["a_init:0:DebugIdentity"])
debug_server.stop_server().wait()
server_thread.join()
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/session_debug_grpc_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Communicating tracebacks and source code with debug server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import socket
import grpc
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.lib import debug_service_pb2_grpc
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.profiler import tfprof_logger
def _load_debugged_source_file(file_path, source_file_proto):
file_stat = gfile.Stat(file_path)
source_file_proto.host = socket.gethostname()
source_file_proto.file_path = file_path
source_file_proto.last_modified = file_stat.mtime_nsec
source_file_proto.bytes = file_stat.length
try:
with gfile.Open(file_path, "r") as f:
source_file_proto.lines.extend(f.read().splitlines())
except IOError:
pass
def _string_to_id(string, string_to_id):
if string not in string_to_id:
string_to_id[string] = len(string_to_id)
return string_to_id[string]
def _format_origin_stack(origin_stack, call_traceback_proto):
"""Format a traceback stack for a `CallTraceback` proto.
Args:
origin_stack: The stack list as returned by `traceback.extract_stack()`.
call_traceback_proto: A `CallTraceback` proto whose fields are to be
populated.
"""
string_to_id = {}
string_to_id[None] = 0
for frame in origin_stack:
file_path, lineno, func_name, line_text = frame
call_traceback_proto.origin_stack.traces.add(
file_id=_string_to_id(file_path, string_to_id),
lineno=lineno,
function_id=_string_to_id(func_name, string_to_id),
line_id=_string_to_id(line_text, string_to_id))
id_to_string = call_traceback_proto.origin_id_to_string
for key, value in string_to_id.items():
id_to_string[value] = key if key is not None else ""
def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):
"""Extract source file paths outside TensorFlow Python library.
Args:
code_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack
traces.
id_to_string: A proto map from integer ids to strings.
Returns:
An iterable of source file paths outside the TensorFlow Python library.
"""
file_ids = set()
for code_def in code_defs:
for trace in code_def.traces:
file_ids.add(trace.file_id)
non_tf_files = (id_to_string[file_id] for file_id in file_ids)
non_tf_files = (
f for f in non_tf_files
if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))
return non_tf_files
def grpc_message_length_bytes():
"""Maximum gRPC message length in bytes."""
return 4 * 1024 * 1024
def _send_call_tracebacks(destinations,
origin_stack,
is_eager_execution=False,
call_key=None,
graph=None,
send_source=True):
"""Send the tracebacks of a TensorFlow execution call.
To gRPC debug server(s). This applies to graph execution (`tf.Session.run()`)
calls and eager execution calls.
If `send_source`, also sends the underlying source files outside the
TensorFlow library.
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
`CallTraceback` proto payload will be sent to all the destinations.
origin_stack: The traceback stack for the origin of the execution call. For
graph execution, this is the traceback of the `tf.Session.run()`
invocation. For eager execution, this is the traceback of the Python
line that executes the eager opertion.
is_eager_execution: (`bool`) whether an eager execution call (i.e., not a
`tf.Session.run` or derived methods) is being sent.
call_key: The key of the execution call, as a string. For graph execution,
this is a string describing the feeds, fetches (and targets) names of the
`tf.Session.run` call. For eager execution, this is ignored.
graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),
which contains op tracebacks, if applicable.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
if not isinstance(destinations, list):
destinations = [destinations]
# Strip grpc:// prefix, if any is present.
destinations = [
dest[len(common.GRPC_URL_PREFIX):]
if dest.startswith(common.GRPC_URL_PREFIX) else dest
for dest in destinations]
call_type = (debug_service_pb2.CallTraceback.EAGER_EXECUTION
if is_eager_execution
else debug_service_pb2.CallTraceback.GRAPH_EXECUTION)
graph_traceback = tfprof_logger.merge_default_with_oplog(
graph, add_trainable_var=False) if graph else None
call_traceback = debug_service_pb2.CallTraceback(
call_type=call_type, call_key=call_key, graph_traceback=graph_traceback,
graph_version=graph.version if graph else None)
_format_origin_stack(origin_stack, call_traceback)
if send_source:
source_file_paths = set()
source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
(log_entry.code_def for log_entry
in call_traceback.graph_traceback.log_entries),
call_traceback.graph_traceback.id_to_string))
source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
[call_traceback.origin_stack], call_traceback.origin_id_to_string))
debugged_source_files = []
for file_path in source_file_paths:
source_files = debug_pb2.DebuggedSourceFiles()
_load_debugged_source_file(
file_path, source_files.source_files.add())
debugged_source_files.append(source_files)
for destination in destinations:
channel = grpc.insecure_channel(destination)
stub = debug_service_pb2_grpc.EventListenerStub(channel)
stub.SendTracebacks(call_traceback)
if send_source:
for path, source_files in zip(
source_file_paths, debugged_source_files):
if source_files.ByteSize() < grpc_message_length_bytes():
stub.SendSourceFiles(source_files)
else:
tf_logging.warn(
"The content of the source file at %s is not sent to "
"gRPC debug server %s, because the message size exceeds "
"gRPC message length limit (%d bytes)." % (
path, destination, grpc_message_length_bytes()))
def send_graph_tracebacks(destinations,
run_key,
origin_stack,
graph,
send_source=True):
"""Send the tracebacks of a graph execution call to debug server(s).
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
`CallTraceback` proto payload will be sent to all the destinations.
run_key: A string describing the feeds, fetches (and targets) names of the
`tf.Session.run` call.
origin_stack: The traceback of the `tf.Session.run()` invocation.
graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),
which contains op tracebacks.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
_send_call_tracebacks(
destinations, origin_stack, is_eager_execution=False, call_key=run_key,
graph=graph, send_source=send_source)
def send_eager_tracebacks(destinations,
origin_stack,
send_source=True):
"""Send the tracebacks of an eager execution call to debug server(s).
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
origin_stack: The traceback of the eager operation invocation.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
_send_call_tracebacks(
destinations, origin_stack, is_eager_execution=True,
send_source=send_source)
|
tensorflow-master
|
tensorflow/python/debug/lib/source_remote.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.compat.v1.Session with grpc:// URLs.
This test focus on grpc:// debugging of distributed (gRPC) sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import subprocess
import sys
import time
import portpicker
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("b/120545219")
class DistributedSessionDebugTest(test_util.TensorFlowTestCase):
"""Test the debugging of distributed sessions."""
PER_PROC_GPU_MEMORY_FRACTION = 0.1
POLLING_INTERVAL_SEC = 0.025
@classmethod
def setUpClass(cls):
gpu_memory_fraction_opt = (
"--gpu_memory_fraction=%f" % cls.PER_PROC_GPU_MEMORY_FRACTION)
worker_port = portpicker.pick_unused_port()
cluster_spec = "worker|localhost:%d" % worker_port
tf_logging.info("cluster_spec: %s", cluster_spec)
server_bin = test.test_src_dir_path(
"python/debug/grpc_tensorflow_server.par")
cls.server_target = "grpc://localhost:%d" % worker_port
cls.server_procs = {}
cls.server_procs["worker"] = subprocess.Popen(
[
server_bin,
"--logtostderr",
"--cluster_spec=%s" % cluster_spec,
"--job_name=worker",
"--task_id=0",
gpu_memory_fraction_opt,
],
stdout=sys.stdout,
stderr=sys.stderr)
# Start debug server in-process, on separate thread.
(cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
cls.debug_server
) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
tf_logging.info("debug server url: %s", cls.debug_server_url)
cls.session_config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=cls.PER_PROC_GPU_MEMORY_FRACTION))
@classmethod
def tearDownClass(cls):
for key in cls.server_procs:
cls.server_procs[key].terminate()
try:
cls.debug_server.stop_server().wait()
except ValueError:
pass
cls.debug_server_thread.join()
def setUp(self):
pass
def tearDown(self):
self.debug_server.clear_data()
def _pollingAssertDebugTensorValuesAllClose(self, expected_values,
debug_tensor_name):
"""Poll debug_server till tensor appears and matches expected values."""
while (debug_tensor_name not in self.debug_server.debug_tensor_values or
len(self.debug_server.debug_tensor_values) < len(expected_values)):
time.sleep(self.POLLING_INTERVAL_SEC)
self.assertAllClose(
expected_values,
self.debug_server.debug_tensor_values[debug_tensor_name])
def _createGraph(self):
"""Create graph for testing.
Returns:
Python Graph object.
"""
with ops.Graph().as_default() as graph:
with ops.device("/job:worker/task:0/cpu:0"):
self.a = variables.VariableV1(10.0, name="a")
self.b = variables.VariableV1(100.0, name="b")
self.inc_a = state_ops.assign_add(self.a, 2.0, name="inc_a")
self.dec_b = state_ops.assign_add(self.b, -5.0, name="dec_b")
self.p = math_ops.multiply(self.inc_a, self.dec_b, name="p")
self.q = math_ops.negative(self.p, name="q")
return graph
def testDistributedRunWithGatedGrpcCommunicatesWithDebugServerCorrectly(self):
graph = self._createGraph()
with session.Session(
config=self.session_config, graph=graph,
target=self.server_target) as sess:
sess.run(self.a.initializer)
sess.run(self.b.initializer)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
sess.graph,
node_name_regex_whitelist=r"a",
debug_ops=["DebugIdentity"],
debug_urls=[self.debug_server_url])
# Test gated_grpc for an op located on the worker, i.e., on the same
# host as where MasterSession is.
# TODO(cais): gRPC gating of debug ops does not work on partition graphs
# not located on MasterSession hosts (e.g., parameter servers) yet. Make
# it work.
debug_utils.watch_graph(
run_options,
sess.graph,
node_name_regex_whitelist=r"p",
debug_ops=["DebugIdentity(gated_grpc=True)"],
debug_urls=[self.debug_server_url])
for i in xrange(4):
if i % 2 == 0:
self.debug_server.request_watch("p", 0, "DebugIdentity")
else:
self.debug_server.request_unwatch("p", 0, "DebugIdentity")
expected_p = (10.0 + 2.0 * (i + 1)) * (100.0 - 5.0 * (i + 1))
self.assertAllClose(-expected_p, sess.run(self.q, options=run_options))
self.assertEqual(1, len(self.debug_server.core_metadata_json_strings))
core_metadata = json.loads(
self.debug_server.core_metadata_json_strings[0])
self.assertEqual([], core_metadata["input_names"])
self.assertEqual(["q:0"], core_metadata["output_names"])
self.assertEqual(i, core_metadata["executor_step_index"])
if i == 0:
self.assertEqual(1, len(self.debug_server.partition_graph_defs))
# Tensor "a" is from a PS. It may take longer to arrive due to the fact
# that the stream connection between the PS and the debug server is
# persistent and not torn down at the end of each Session.run()
self._pollingAssertDebugTensorValuesAllClose([10.0 + 2.0 * i],
"a:0:DebugIdentity")
# Due to the gRPC gating of the debug op for "p", the debug tensor
# should be available on odd-indexed runs.
if i % 2 == 0:
self.assertAllClose(
[expected_p],
self.debug_server.debug_tensor_values["p:0:DebugIdentity"])
else:
self.assertNotIn("p:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.assertNotIn("b:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.debug_server.clear_data()
def testDistributedRunWithGrpcDebugWrapperWorks(self):
graph = self._createGraph()
with session.Session(
config=self.session_config, graph=graph,
target=self.server_target) as sess:
sess.run(self.a.initializer)
sess.run(self.b.initializer)
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"p")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
for i in xrange(4):
expected_p = (10.0 + 2.0 * (i + 1)) * (100.0 - 5.0 * (i + 1))
self.assertAllClose(-expected_p, sess.run(self.q))
if i == 0:
self.assertEqual(1, len(self.debug_server.partition_graph_defs))
self.assertAllClose(
[expected_p],
self.debug_server.debug_tensor_values["p:0:DebugIdentity"])
self.assertNotIn("b:0:DebugIdentity",
self.debug_server.debug_tensor_values)
self.debug_server.clear_data()
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/dist_session_debug_grpc_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities under multiple (i.e., >1) GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SessionDebugMultiGPUTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def testMultiGPUSessionRun(self):
local_devices = device_lib.list_local_devices()
gpu_device_names = []
for device in local_devices:
if device.device_type == "GPU":
gpu_device_names.append(device.name)
gpu_device_names = sorted(gpu_device_names)
if len(gpu_device_names) < 2:
self.skipTest(
"This test requires at least 2 GPUs, but only %d is available." %
len(gpu_device_names))
with session.Session() as sess:
v = variables.Variable([10.0, 15.0], dtype=dtypes.float32, name="v")
with ops.device(gpu_device_names[0]):
u0 = math_ops.add(v, v, name="u0")
with ops.device(gpu_device_names[1]):
u1 = math_ops.multiply(v, v, name="u1")
w = math_ops.subtract(u1, u0, name="w")
self.evaluate(v.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options, sess.graph,
debug_urls="file://" + self._dump_root)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
[80.0, 195.0],
sess.run(w, options=run_options, run_metadata=run_metadata))
debug_dump_dir = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(3, len(debug_dump_dir.devices()))
self.assertAllClose(
[10.0, 15.0], debug_dump_dir.get_tensors("v", 0, "DebugIdentity")[0])
self.assertAllClose(
[20.0, 30.0], debug_dump_dir.get_tensors("u0", 0, "DebugIdentity")[0])
self.assertAllClose(
[100.0, 225.0],
debug_dump_dir.get_tensors("u1", 0, "DebugIdentity")[0])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/session_debug_multi_gpu_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
#
# Do not use pylint on generated code.
# pylint: disable=missing-docstring,g-short-docstring-punctuation,g-no-space-after-docstring-summary,invalid-name,line-too-long,unused-argument,g-doc-args
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
from tensorflow.core.debug import debug_service_pb2 as tensorflow_dot_core_dot_debug_dot_debug__service__pb2
from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2
from tensorflow.core.util import event_pb2 as tensorflow_dot_core_dot_util_dot_event__pb2
class EventListenerStub(object):
"""EventListener: Receives Event protos, e.g., from debugged TensorFlow
runtime(s).
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendEvents = channel.stream_stream(
'/tensorflow.EventListener/SendEvents',
request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendTracebacks = channel.unary_unary(
'/tensorflow.EventListener/SendTracebacks',
request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
self.SendSourceFiles = channel.unary_unary(
'/tensorflow.EventListener/SendSourceFiles',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString,
)
class EventListenerServicer(object):
"""EventListener: Receives Event protos, e.g., from debugged TensorFlow
runtime(s).
"""
def SendEvents(self, request_iterator, context):
"""Client(s) can use this RPC method to send the EventListener Event protos.
The Event protos can hold information such as:
1) intermediate tensors from a debugged graph being executed, which can
be sent from DebugIdentity ops configured with grpc URLs.
2) GraphDefs of partition graphs, which can be sent from special debug
ops that get executed immediately after the beginning of the graph
execution.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendTracebacks(self, request, context):
"""Send the tracebacks of ops in a Python graph definition.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendSourceFiles(self, request, context):
"""Send a collection of source code files being debugged.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EventListenerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendEvents': grpc.stream_stream_rpc_method_handler(
servicer.SendEvents,
request_deserializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
'SendTracebacks': grpc.unary_unary_rpc_method_handler(
servicer.SendTracebacks,
request_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
'SendSourceFiles': grpc.unary_unary_rpc_method_handler(
servicer.SendSourceFiles,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.FromString,
response_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.EventListener', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_service_pb2_grpc.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session with file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
@test_util.run_v1_only("b/120545219")
class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase):
def _debug_urls(self, run_number=None):
return ["file://%s" % self._debug_dump_dir(run_number=run_number)]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testAllowsDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph."""
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val = [[5.0, 3.0], [-1.0, 0.0]]
v_init_val = [[2.0], [-1.0]]
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "diff_Watch/u"
v_name = "diff_Watch/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name="diff_Watch/matmul")
u.initializer.run()
v.initializer.run()
for i in range(2):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_dump_root = self._debug_dump_dir(run_number=i)
debug_urls = self._debug_urls(run_number=i)
if i == 0:
# First debug run: Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
else:
# Second debug run: Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
run_dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Each run should have generated only one dumped tensor, not two.
self.assertEqual(1, dump.size)
if i == 0:
self.assertAllClose([u_init_val],
dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0,
"DebugIdentity")[0], 0)
else:
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0,
"DebugIdentity")[0], 0)
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for _ in range(self._num_concurrent_runs):
self._dump_roots.append(tempfile.mkdtemp())
def tearDown(self):
ops.reset_default_graph()
for dump_root in self._dump_roots:
if os.path.isdir(dump_root):
shutil.rmtree(dump_root)
def _get_concurrent_debug_urls(self):
return [("file://%s" % dump_root) for dump_root in self._dump_roots]
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/session_debug_file_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for debug_gradients module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_gradients
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
@test_util.run_deprecated_v1
class IdentifyGradientTest(test_util.TensorFlowTestCase):
def setUp(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
with self.sess.as_default():
self.u = variables.Variable(2.0, name="u")
self.v = variables.Variable(3.0, name="v")
self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w")
def tearDown(self):
ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers()
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w)
with self.assertRaisesRegexp(ValueError,
"The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w)
def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
y = math_ops.add(self.w, -1.0, name="y")
debug_y = grad_debugger_1.identify_gradient(y)
z1 = math_ops.square(debug_y, name="z1")
debug_y = grad_debugger_2.identify_gradient(y)
z2 = math_ops.sqrt(debug_y, name="z2")
with grad_debugger_1:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
with grad_debugger_2:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger_1.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
# There are >1 gradient debuggers registered, and grad_debugger is not used
# as a context manager here, so the gradient w.r.t. self.w will not be
# registered.
gradients_impl.gradients(y, [self.u, self.v])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_1.gradient_tensor(self.w)
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w)
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegexp(
TypeError,
r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer())
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
gradient_descent.GradientDescentOptimizer(0.1).minimize(y)
self.sess.run(variables.global_variables_initializer())
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,
"(u|w):0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
self.assertEqual(2, len(grad_debugger.gradient_tensors()))
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), ops.Tensor)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(1.0, self.sess.run(
grad_debugger.gradient_tensor("w:0")))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_x_tensors().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.assertEqual(3, len(grad_debugger.gradient_tensors()))
u_grad = grad_debugger.gradient_tensor(self.u)
w_grad = grad_debugger.gradient_tensor(self.w)
y_grad = grad_debugger.gradient_tensor(y)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(10.0, self.sess.run(y_grad))
self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad))
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1")
z2 = math_ops.sqrt(y, name="z2")
grad_debugger_1 = debug_gradients.GradientsDebugger()
with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
grad_debugger_2 = debug_gradients.GradientsDebugger()
with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
dump_dir = tempfile.mkdtemp()
debug_url = "file://" + dump_dir
debug_utils.watch_graph(run_options, self.sess.graph, debug_urls=debug_url)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(2.0, self.sess.run(self.u))
self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
self.assertAllClose(-1.0, self.sess.run(self.u))
dump = debug_data.DebugDumpDir(
dump_dir, partition_graphs=run_metadata.partition_graphs)
dump.set_python_graph(self.sess.graph)
y_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, y, dump)
self.assertEqual(1, len(y_grad_values))
self.assertAllClose(10.0, y_grad_values[0])
w_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.w, dump)
self.assertEqual(1, len(w_grad_values))
self.assertAllClose(10.0, w_grad_values[0])
u_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.u, dump)
self.assertEqual(1, len(u_grad_values))
self.assertAllClose(30.0, u_grad_values[0])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "
r"x-tensor v:0"):
debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)
# Cleanup.
shutil.rmtree(dump_dir)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_gradients_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python-based TensorFlow GRPC server.
Takes input arguments cluster_spec, job_name and task_id, and start a blocking
TensorFlow GRPC server.
Usage:
grpc_tensorflow_server.py --cluster_spec=SPEC --job_name=NAME --task_id=ID
Where:
SPEC is <JOB>(,<JOB>)*
JOB is <NAME>|<HOST:PORT>(;<HOST:PORT>)*
NAME is a valid job name ([a-z][0-9a-z]*)
HOST is a hostname or IP address
PORT is a port number
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
def parse_cluster_spec(cluster_spec, cluster, verbose=False):
"""Parse content of cluster_spec string and inject info into cluster protobuf.
Args:
cluster_spec: cluster specification string, e.g.,
"local|localhost:2222;localhost:2223"
cluster: cluster protobuf.
verbose: If verbose logging is requested.
Raises:
ValueError: if the cluster_spec string is invalid.
"""
job_strings = cluster_spec.split(",")
if not cluster_spec:
raise ValueError("Empty cluster_spec string")
for job_string in job_strings:
job_def = cluster.job.add()
if job_string.count("|") != 1:
raise ValueError("Not exactly one instance of '|' in cluster_spec")
job_name = job_string.split("|")[0]
if not job_name:
raise ValueError("Empty job_name in cluster_spec")
job_def.name = job_name
if verbose:
logging.info("Added job named \"%s\"", job_name)
job_tasks = job_string.split("|")[1].split(";")
for i in range(len(job_tasks)):
if not job_tasks[i]:
raise ValueError("Empty task string at position %d" % i)
job_def.tasks[i] = job_tasks[i]
if verbose:
logging.info(" Added task \"%s\" to job \"%s\"",
job_tasks[i], job_name)
def main(unused_args):
# Create Protobuf ServerDef
server_def = tensorflow_server_pb2.ServerDef(protocol="grpc")
# Cluster info
parse_cluster_spec(FLAGS.cluster_spec, server_def.cluster, FLAGS.verbose)
# Job name
if not FLAGS.job_name:
raise ValueError("Empty job_name")
server_def.job_name = FLAGS.job_name
# Task index
if FLAGS.task_id < 0:
raise ValueError("Invalid task_id: %d" % FLAGS.task_id)
server_def.task_index = FLAGS.task_id
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction))
# Create GRPC Server instance
server = server_lib.Server(server_def, config=config)
# join() is blocking, unlike start()
server.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--cluster_spec",
type=str,
default="",
help="""\
Cluster spec: SPEC. SPEC is <JOB>(,<JOB>)*," JOB is
<NAME>|<HOST:PORT>(;<HOST:PORT>)*," NAME is a valid job name
([a-z][0-9a-z]*)," HOST is a hostname or IP address," PORT is a
port number." E.g., local|localhost:2222;localhost:2223,
ps|ps0:2222;ps1:2222\
"""
)
parser.add_argument(
"--job_name",
type=str,
default="",
help="Job name: e.g., local"
)
parser.add_argument(
"--task_id",
type=int,
default=0,
help="Task index, e.g., 0"
)
parser.add_argument(
"--gpu_memory_fraction",
type=float,
default=1.0,
help="Fraction of GPU memory allocated",)
parser.add_argument(
"--verbose",
type="bool",
nargs="?",
const=True,
default=False,
help="Verbose mode"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/python/debug/lib/grpc_tensorflow_server.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures and algorithms for profiling information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
class ProfileDatum(object):
"""Profile data point."""
def __init__(self,
device_name,
node_exec_stats,
file_path,
line_number,
func_name,
op_type):
"""Constructor.
Args:
device_name: (string) name of the device.
node_exec_stats: `NodeExecStats` proto.
file_path: path to the source file involved in creating the op.
line_number: line number in the file involved in creating the op.
func_name: name of the function that the line belongs to.
op_type: (string) Operation type.
"""
self.device_name = device_name
self.node_exec_stats = node_exec_stats
self.file_path = file_path
self.line_number = line_number
self.func_name = func_name
if self.file_path:
self.file_line_func = "%s:%d(%s)" % (
os.path.basename(self.file_path), self.line_number, self.func_name)
else:
self.file_line_func = ""
self.op_type = op_type
self.start_time = self.node_exec_stats.all_start_micros
self.op_time = (self.node_exec_stats.op_end_rel_micros -
self.node_exec_stats.op_start_rel_micros)
@property
def exec_time(self):
"""Op execution time plus pre- and post-processing."""
return self.node_exec_stats.all_end_rel_micros
class AggregateProfile(object):
"""Profile summary data for aggregating a number of ProfileDatum."""
def __init__(self, profile_datum):
"""Constructor.
Args:
profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to
initialize this object with.
"""
self.total_op_time = profile_datum.op_time
self.total_exec_time = profile_datum.exec_time
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
self._node_to_exec_count = {device_and_node: 1}
def add(self, profile_datum):
"""Accumulate a new instance of ProfileDatum.
Args:
profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to
accumulate to this object.
"""
self.total_op_time += profile_datum.op_time
self.total_exec_time += profile_datum.exec_time
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
if device_and_node in self._node_to_exec_count:
self._node_to_exec_count[device_and_node] += 1
else:
self._node_to_exec_count[device_and_node] = 1
@property
def node_count(self):
return len(self._node_to_exec_count)
@property
def node_exec_count(self):
return sum(self._node_to_exec_count.values())
|
tensorflow-master
|
tensorflow/python/debug/lib/profiling.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfdbg module debug_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ParseNodeOrTensorNameTest(test_util.TensorFlowTestCase):
def testParseNodeName(self):
node_name, slot = debug_graphs.parse_node_or_tensor_name(
"namespace1/node_1")
self.assertEqual("namespace1/node_1", node_name)
self.assertIsNone(slot)
def testParseTensorName(self):
node_name, slot = debug_graphs.parse_node_or_tensor_name(
"namespace1/node_2:3")
self.assertEqual("namespace1/node_2", node_name)
self.assertEqual(3, slot)
class GetNodeNameAndOutputSlotTest(test_util.TensorFlowTestCase):
def testParseTensorNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a:0"))
self.assertEqual(0, debug_graphs.get_output_slot("a:0"))
self.assertEqual("_b", debug_graphs.get_node_name("_b:1"))
self.assertEqual(1, debug_graphs.get_output_slot("_b:1"))
def testParseNodeNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a"))
self.assertEqual(0, debug_graphs.get_output_slot("a"))
class NodeNameChecksTest(test_util.TensorFlowTestCase):
def testIsCopyNode(self):
self.assertTrue(debug_graphs.is_copy_node("__copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("_copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("_copyns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("__dbg_ns1/ns2/node3_0"))
def testIsDebugNode(self):
self.assertTrue(
debug_graphs.is_debug_node("__dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("_dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("_dbgns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(debug_graphs.is_debug_node("__copy_ns1/ns2/node3_0"))
class ParseDebugNodeNameTest(test_util.TensorFlowTestCase):
def testParseDebugNodeName_valid(self):
debug_node_name_1 = "__dbg_ns_a/ns_b/node_c:1_0_DebugIdentity"
(watched_node, watched_output_slot, debug_op_index,
debug_op) = debug_graphs.parse_debug_node_name(debug_node_name_1)
self.assertEqual("ns_a/ns_b/node_c", watched_node)
self.assertEqual(1, watched_output_slot)
self.assertEqual(0, debug_op_index)
self.assertEqual("DebugIdentity", debug_op)
def testParseDebugNodeName_invalidPrefix(self):
invalid_debug_node_name_1 = "__copy_ns_a/ns_b/node_c:1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid prefix"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_missingDebugOpIndex(self):
invalid_debug_node_name_1 = "__dbg_node1:0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid debug node name"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_invalidWatchedTensorName(self):
invalid_debug_node_name_1 = "__dbg_node1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError,
"Invalid tensor name in debug node name"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_graphs_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from six.moves import xrange # pylint: disable=redefined-builtin
def add_debug_tensor_watch(run_options,
node_name,
output_slot=0,
debug_ops="DebugIdentity",
debug_urls=None,
tolerate_debug_op_creation_failures=False,
global_step=-1):
"""Add watch on a `Tensor` to `RunOptions`.
N.B.:
1. Under certain circumstances, the `Tensor` may not get actually watched
(e.g., if the node of the `Tensor` is constant-folded during runtime).
2. For debugging purposes, the `parallel_iteration` attribute of all
`tf.while_loop`s in the graph are set to 1 to prevent any node from
being executed multiple times concurrently. This change does not affect
subsequent non-debugged runs of the same `tf.while_loop`s.
Args:
run_options: An instance of `config_pb2.RunOptions` to be modified.
node_name: (`str`) name of the node to watch.
output_slot: (`int`) output slot index of the tensor from the watched node.
debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s). Can be a
`list` of `str` or a single `str`. The latter case is equivalent to a
`list` of `str` with only one element.
For debug op types with customizable attributes, each debug op string can
optionally contain a list of attribute names, in the syntax of:
debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...)
debug_urls: (`str` or `list` of `str`) URL(s) to send debug values to,
e.g., `file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`.
tolerate_debug_op_creation_failures: (`bool`) Whether to tolerate debug op
creation failures by not throwing exceptions.
global_step: (`int`) Optional global_step count for this debug tensor
watch.
"""
watch_opts = run_options.debug_options.debug_tensor_watch_opts
run_options.debug_options.global_step = global_step
watch = watch_opts.add()
watch.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
watch.node_name = node_name
watch.output_slot = output_slot
if isinstance(debug_ops, str):
debug_ops = [debug_ops]
watch.debug_ops.extend(debug_ops)
if debug_urls:
if isinstance(debug_urls, str):
debug_urls = [debug_urls]
watch.debug_urls.extend(debug_urls)
def watch_graph(run_options,
graph,
debug_ops="DebugIdentity",
debug_urls=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False,
global_step=-1,
reset_disk_byte_usage=False):
"""Add debug watches to `RunOptions` for a TensorFlow graph.
To watch all `Tensor`s on the graph, let both `node_name_regex_whitelist`
and `op_type_regex_whitelist` be the default (`None`).
N.B.:
1. Under certain circumstances, the `Tensor` may not get actually watched
(e.g., if the node of the `Tensor` is constant-folded during runtime).
2. For debugging purposes, the `parallel_iteration` attribute of all
`tf.while_loop`s in the graph are set to 1 to prevent any node from
being executed multiple times concurrently. This change does not affect
subsequent non-debugged runs of the same `tf.while_loop`s.
Args:
run_options: An instance of `config_pb2.RunOptions` to be modified.
graph: An instance of `ops.Graph`.
debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use.
debug_urls: URLs to send debug values to. Can be a list of strings,
a single string, or None. The case of a single string is equivalent to
a list consisting of a single string, e.g., `file:///tmp/tfdbg_dump_1`,
`grpc://localhost:12345`.
For debug op types with customizable attributes, each debug op name string
can optionally contain a list of attribute names, in the syntax of:
debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...)
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-expression whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
global_step: (`int`) Optional global_step count for this debug tensor
watch.
reset_disk_byte_usage: (`bool`) whether to reset the tracked disk byte
usage to zero (default: `False`).
"""
if isinstance(debug_ops, str):
debug_ops = [debug_ops]
node_name_pattern = (re.compile(node_name_regex_whitelist)
if node_name_regex_whitelist else None)
op_type_pattern = (re.compile(op_type_regex_whitelist)
if op_type_regex_whitelist else None)
tensor_dtype_pattern = (re.compile(tensor_dtype_regex_whitelist)
if tensor_dtype_regex_whitelist else None)
ops = graph.get_operations()
for op in ops:
# Skip nodes without any output tensors.
if not op.outputs:
continue
node_name = op.name
op_type = op.type
if node_name_pattern and not node_name_pattern.match(node_name):
continue
if op_type_pattern and not op_type_pattern.match(op_type):
continue
for slot in xrange(len(op.outputs)):
if (tensor_dtype_pattern and
not tensor_dtype_pattern.match(op.outputs[slot].dtype.name)):
continue
add_debug_tensor_watch(
run_options,
node_name,
output_slot=slot,
debug_ops=debug_ops,
debug_urls=debug_urls,
tolerate_debug_op_creation_failures=(
tolerate_debug_op_creation_failures),
global_step=global_step)
run_options.debug_options.reset_disk_byte_usage = reset_disk_byte_usage
def watch_graph_with_blacklists(run_options,
graph,
debug_ops="DebugIdentity",
debug_urls=None,
node_name_regex_blacklist=None,
op_type_regex_blacklist=None,
tensor_dtype_regex_blacklist=None,
tolerate_debug_op_creation_failures=False,
global_step=-1,
reset_disk_byte_usage=False):
"""Add debug tensor watches, blacklisting nodes and op types.
This is similar to `watch_graph()`, but the node names and op types are
blacklisted, instead of whitelisted.
N.B.:
1. Under certain circumstances, the `Tensor` may not get actually watched
(e.g., if the node of the `Tensor` is constant-folded during runtime).
2. For debugging purposes, the `parallel_iteration` attribute of all
`tf.while_loop`s in the graph are set to 1 to prevent any node from
being executed multiple times concurrently. This change does not affect
subsequent non-debugged runs of the same `tf.while_loop`s.
Args:
run_options: An instance of `config_pb2.RunOptions` to be modified.
graph: An instance of `ops.Graph`.
debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use.
See the documentation of `watch_graph` for more details.
debug_urls: URL(s) to send debug values to, e.g.,
`file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`.
node_name_regex_blacklist: Regular-expression blacklist for node_name.
This should be a string, e.g., `"(weight_[0-9]+|bias_.*)"`.
op_type_regex_blacklist: Regular-expression blacklist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both node_name_regex_blacklist and op_type_regex_blacklist
are set, the two filtering operations will occur in a logical `OR`
relation. In other words, a node will be excluded if it hits either of
the two blacklists; a node will be included if and only if it hits
neither of the blacklists.
tensor_dtype_regex_blacklist: Regular-expression blacklist for Tensor
data type, e.g., `"^int.*"`.
This blacklist operates in logical `OR` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
global_step: (`int`) Optional global_step count for this debug tensor
watch.
reset_disk_byte_usage: (`bool`) whether to reset the tracked disk byte
usage to zero (default: `False`).
"""
if isinstance(debug_ops, str):
debug_ops = [debug_ops]
node_name_pattern = (re.compile(node_name_regex_blacklist) if
node_name_regex_blacklist else None)
op_type_pattern = (re.compile(op_type_regex_blacklist) if
op_type_regex_blacklist else None)
tensor_dtype_pattern = (re.compile(tensor_dtype_regex_blacklist) if
tensor_dtype_regex_blacklist else None)
ops = graph.get_operations()
for op in ops:
# Skip nodes without any output tensors.
if not op.outputs:
continue
node_name = op.name
op_type = op.type
if node_name_pattern and node_name_pattern.match(node_name):
continue
if op_type_pattern and op_type_pattern.match(op_type):
continue
for slot in xrange(len(op.outputs)):
if (tensor_dtype_pattern and
tensor_dtype_pattern.match(op.outputs[slot].dtype.name)):
continue
add_debug_tensor_watch(
run_options,
node_name,
output_slot=slot,
debug_ops=debug_ops,
debug_urls=debug_urls,
tolerate_debug_op_creation_failures=(
tolerate_debug_op_creation_failures),
global_step=global_step)
run_options.debug_options.reset_disk_byte_usage = reset_disk_byte_usage
|
tensorflow-master
|
tensorflow/python/debug/lib/debug_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_remote."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import source_remote
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class SendTracebacksTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
test_util.TensorFlowTestCase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
cls._server_address = "localhost:%d" % cls._server_port
(cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,
cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address_2 = "localhost:%d" % cls._server_port_2
cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
cls._server_2.stop_server().wait()
cls._server_thread_2.join()
test_util.TensorFlowTestCase.tearDownClass()
def tearDown(self):
ops.reset_default_graph()
self._server.clear_data()
self._server_2.clear_data()
super(SendTracebacksTest, self).tearDown()
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
"""Find the first trace of an op that belongs to the TF Python library."""
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace[0]):
return trace
def testSendGraphTracebacksToSingleDebugServer(self):
this_func_name = "testSendGraphTracebacksToSingleDebugServer"
with session.Session() as sess:
a = variables.Variable(21.0, name="a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="b")
b_lineno = line_number_above()
math_ops.add(a, b, name="x")
x_lineno = line_number_above()
send_stack = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
self._server_address, "dummy_run_key", send_stack, sess.graph)
tb = self._server.query_op_traceback("a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
self.assertEqual(
" a = variables.Variable(21.0, name=\"a\")",
self._server.query_source_file_line(__file__, a_lineno))
# Files in the TensorFlow code base shouldn not have been sent.
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
self._server.query_call_types())
self.assertEqual(["dummy_run_key"], self._server.query_call_keys())
self.assertEqual(
[sess.graph.version], self._server.query_graph_versions())
def testSendGraphTracebacksToTwoDebugServers(self):
this_func_name = "testSendGraphTracebacksToTwoDebugServers"
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
servers = [self._server, self._server_2]
for server in servers:
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
self.assertEqual(
" x = math_ops.add(a, b, name=\"two/x\")",
server.query_source_file_line(__file__, x_lineno))
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(x.op)
with self.assertRaises(ValueError):
server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
server.query_call_types())
self.assertEqual(["dummy_run_key"], server.query_call_keys())
self.assertEqual([sess.graph.version], server.query_graph_versions())
def testSourceFileSizeExceedsGrpcMessageLengthLimit(self):
"""In case source file size exceeds the grpc message length limit.
it ought not to have been sent to the server.
"""
this_func_name = "testSourceFileSizeExceedsGrpcMessageLengthLimit"
# Patch the method to simulate a very small message length limit.
with test.mock.patch.object(
source_remote, "grpc_message_length_bytes", return_value=2):
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
servers = [self._server, self._server_2]
for server in servers:
# Even though the source file content is not sent, the traceback
# should have been sent.
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
tf_trace_file_path = (
self._findFirstTraceInsideTensorFlowPyLibrary(x.op))
# Verify that the source content is not sent to the server.
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
def testSendEagerTracebacksToSingleDebugServer(self):
this_func_name = "testSendEagerTracebacksToSingleDebugServer"
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_eager_tracebacks(self._server_address, send_traceback)
self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],
self._server.query_call_types())
self.assertIn((self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/source_remote_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the basic data structures and algorithms for profiling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import step_stats_pb2
from tensorflow.python.debug.lib import profiling
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class AggregateProfile(test_util.TensorFlowTestCase):
def setUp(self):
node_1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
self.profile_datum_1 = profiling.ProfileDatum(
"cpu:0", node_1, "/foo/bar.py", 10, "func1", "Add")
node_2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
op_start_rel_micros=13,
op_end_rel_micros=16,
all_end_rel_micros=17)
self.profile_datum_2 = profiling.ProfileDatum(
"cpu:0", node_2, "/foo/bar.py", 11, "func1", "Mul")
node_3 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=103,
op_end_rel_micros=105,
all_end_rel_micros=4)
self.profile_datum_3 = profiling.ProfileDatum(
"cpu:0", node_3, "/foo/bar.py", 12, "func1", "Add")
node_4 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=203,
op_end_rel_micros=205,
all_end_rel_micros=4)
self.profile_datum_4 = profiling.ProfileDatum(
"gpu:0", node_4, "/foo/bar.py", 13, "func1", "Add")
def testAggregateProfileConstructorWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
self.assertEqual(2, aggregate_data.total_op_time)
self.assertEqual(4, aggregate_data.total_exec_time)
self.assertEqual(1, aggregate_data.node_count)
self.assertEqual(1, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
self.assertEqual(5, aggregate_data.total_op_time)
self.assertEqual(21, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_2)
aggregate_data.add(self.profile_datum_3)
self.assertEqual(7, aggregate_data.total_op_time)
self.assertEqual(25, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(3, aggregate_data.node_exec_count)
def testAddToAggregateProfileWithDifferentDeviceSameNodeWorks(self):
aggregate_data = profiling.AggregateProfile(self.profile_datum_1)
aggregate_data.add(self.profile_datum_4)
self.assertEqual(4, aggregate_data.total_op_time)
self.assertEqual(8, aggregate_data.total_exec_time)
self.assertEqual(2, aggregate_data.node_count)
self.assertEqual(2, aggregate_data.node_exec_count)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/debug/lib/profiling_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a Keil uVision project file from a template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
def sanitize_xml(unsanitized):
"""Uses a whitelist to avoid generating bad XML."""
return re.sub(r'[^a-zA-Z0-9+_\-/\\.]', '', unsanitized)
def main(unused_args, flags):
"""Generates a Keil project file from a template source."""
with open(flags.input_template, 'r') as input_template_file:
template_file_text = input_template_file.read()
template_file_text = re.sub(r'%{EXECUTABLE}%', flags.executable,
template_file_text)
srcs_list = flags.srcs.split(' ')
hdrs_list = flags.hdrs.split(' ')
all_srcs_list = srcs_list + hdrs_list
all_srcs_list.sort()
replace_srcs = ''
for src in all_srcs_list:
if not src:
continue
ext = os.path.splitext(src)[1]
# These extension indexes are used by uVision to keep track of the type
# of files. I determined them by experimentation, since the file format
# isn't documented.
if ext == '.h':
ext_index = '5'
elif ext == '.c':
ext_index = '1'
elif ext == '.cc' or ext == '.cpp':
ext_index = '8'
else:
ext_index = '5'
basename = sanitize_xml(os.path.basename(src))
clean_src = sanitize_xml(src)
replace_srcs += ' <File>\n'
replace_srcs += ' <FileName>' + basename + '</FileName>\n'
replace_srcs += ' <FileType>' + ext_index + '</FileType>\n'
replace_srcs += ' <FilePath>' + clean_src + '</FilePath>\n'
replace_srcs += ' </File>\n'
template_file_text = re.sub(r'%{SRCS}%', replace_srcs, template_file_text)
include_paths = re.sub(' ', ';', flags.include_paths)
template_file_text = re.sub(r'%{INCLUDE_PATHS}%', include_paths,
template_file_text)
with open(flags.output_file, 'w') as output_file:
output_file.write(template_file_text)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_template',
type=str,
default='',
help='Path to template project file to build from.')
parser.add_argument(
'--output_file',
type=str,
default='',
help='Path to write the completed project file to.')
parser.add_argument(
'--executable',
type=str,
default='',
help='Name of the executable the project will build.')
parser.add_argument(
'--hdrs',
type=str,
default='',
help='Space-separated list of C or C++ source files to compile.')
parser.add_argument(
'--srcs',
type=str,
default='',
help='Space-separated list of C or C++ header files to include.')
parser.add_argument(
'--include_paths',
type=str,
default='',
help='Space-separated list of paths to look for header files on.')
flags, unparsed = parser.parse_known_args()
main(unparsed, flags)
if __name__ == '__main__':
parse_args()
|
tensorflow-master
|
tensorflow/lite/experimental/micro/tools/make/generate_keil_project.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resolves non-system C/C++ includes to their full paths to help Arduino."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
import sys
def main(unused_args, flags):
"""Resolves third party headers to their full paths in source code."""
input_file_lines = sys.stdin.read().split('\n')
supplied_headers_list = flags.third_party_headers.split(' ')
output_lines = []
for line in input_file_lines:
include_match = re.match(r'(.*#include.*")(.*)(")', line)
if include_match:
path = include_match.group(2)
for supplied_header in supplied_headers_list:
if supplied_header.endswith(path):
path = supplied_header
break
line = include_match.group(1) + path + include_match.group(3)
output_lines.append(line)
output_text = '\n'.join(output_lines)
sys.stdout.write(output_text)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--third_party_headers',
type=str,
default='',
help='Space-separated list of headers to resolve.')
flags, unparsed = parser.parse_known_args()
main(unparsed, flags)
if __name__ == '__main__':
parse_args()
|
tensorflow-master
|
tensorflow/lite/experimental/micro/tools/make/transform_arduino_source.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
|
tensorflow-master
|
tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/compare_1k.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts values pulled from the microcontroller into audio files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
# import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
def new_data_to_array(fn):
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
y = struct.unpack('<' + 'h' * int(len(b) / 2), b)
return y
data = 'captured_data.txt'
values = np.array(new_data_to_array(data)).astype(float)
# plt.plot(values, 'o-')
# plt.show(block=False)
wav = values / np.max(np.abs(values))
sf.write('captured_data.wav', wav, 16000)
|
tensorflow-master
|
tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/captured_data_to_wav.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Outputs tables used for fast calculations at runtime."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import soundfile as sf
import numpy as np
def to_cc(x, varname, directory='', scale_factor=1):
"""Writes table values to a C++ source file."""
x = (x / np.max(np.abs(x))) * 32768 * scale_factor
x[x > 32767] = 32767
x[x < -32768] = -32768
x = x.astype(int)
x = [str(v) if i % 10 != 0 else '\n ' + str(v) for i, v in enumerate(x)]
cmsis_path = 'tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS'
xstr = '#include "{}/{}.h"\n\n'.format(cmsis_path, varname)
xstr += 'const int g_{}_size = {};\n'.format(varname, len(x))
xstr += 'const int16_t g_{}[{}] = {{{}}};\n'.format(varname, len(x),
', '.join(x))
with open(directory + varname + '.cc', 'w') as f:
f.write(xstr)
def to_h(_, varname, directory=''):
"""Writes a header file for the table values."""
tf_prepend = 'TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_SPEECH_'
xstr = '#ifndef {}{}_H_\n'.format(tf_prepend, varname.upper())
xstr += '#define {}{}_H_\n\n'.format(tf_prepend, varname.upper())
xstr += '#include <cstdint>\n\n'
xstr += 'extern const int g_{}_size;\n'.format(varname)
xstr += 'extern const int16_t g_{}[];\n\n'.format(varname)
xstr += '#endif'
with open(directory + varname + '.h', 'w') as f:
f.write(xstr)
# x = sf.read('yes_f2e59fea_nohash_1.wav')[0]
# to_cc(x, 'yes_waveform')
# to_h(x, 'yes_waveform')
#
# x = sf.read('no_f9643d42_nohash_4.wav')[0]
# to_cc(x, 'no_waveform')
# to_h(x, 'no_waveform')
# 30ms of data @ 16 kHz = 480 samples
hann = np.hanning(int(16000 * 0.03)) # Window 30ms of data
to_cc(hann, 'hanning', directory='./')
to_h(hann, 'hanning', directory='./')
t = np.arange(16000. * 0.03) / 16000.
sin1k = np.sin(
2 * np.pi * 1000 *
t) # Factor of 10 because micro preprocessing overflows otherwise
to_cc(sin1k, 'sin_1k', directory='./', scale_factor=0.1)
to_h(sin1k, 'sin_1k', directory='./')
|
tensorflow-master
|
tensorflow/lite/experimental/micro/examples/micro_speech/CMSIS/create_constants.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testStaticRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
False,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
False,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
True,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TfLite BasicRnnCell wrapper.
TODO(renjieliu): Find a better home for this one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.lite.python.op_hint import OpHint
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["lite.experimental.nn.TfLiteRNNCell"])
class TfLiteRNNCell(rnn_cell_impl.LayerRNNCell):
"""The most basic RNN cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initializes the parameters for an RNN cell.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. Raises an error if not `True` and the existing scope
already has the given variables.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
Raises:
ValueError: If the existing scope already has the given variables.
"""
super(TfLiteRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
# Inputs must be Rank-2.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = OpHint("UnidirectionalSequenceRnn")
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
"""Builds the RNN cell.
Args:
inputs_shape: Rnn input tensor shape.
Raises:
ValueError: If last dimension of the input shape is not known.
"""
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
(inputs_shape,))
input_depth = inputs_shape[-1]
def add_variable_wrapped(name, shape, initializer, index):
var = self.add_weight(name, shape=shape, initializer=initializer)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
self._input_weights = add_variable_wrapped(
"input_weights", [self._num_units, input_depth], None, 1)
self._recurrent_weights = add_variable_wrapped(
"recurrent_weights", [self._num_units, self._num_units], None, 2)
self._bias = add_variable_wrapped(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype),
index=3)
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
state = self._tflite_wrapper.add_input(
state,
tag="hidden_state",
name="hidden_state",
aggregate="first",
index_override=4)
weights = array_ops.transpose(
array_ops.concat([self._input_weights, self._recurrent_weights], 1))
gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), weights)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
output = self._tflite_wrapper.add_output(
output,
tag="output",
name="output",
index_override=1,
aggregate="stack")
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TfLiteRNNCell, self).get_config()
return dict(itertools.chain(base_config.items(), config.items()))
@tf_export(v1=["lite.experimental.nn.TFLiteLSTMCell"])
class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops (transposed and seaparated).
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`. When
restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(TFLiteLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# TODO(raziel): decide if we want to just support tuples (yes please!).
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
# Inputs must be 2-dimensional.
# TODO(raziel): layers stuff -- chop if un-layerizing Op.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = OpHint("UnidirectionalSequenceLstm")
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
self._output_size = num_proj if num_proj else num_units
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, self._output_size)
if state_is_tuple else num_units + self._output_size)
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def build(self, inputs_shape):
"""Build TfLite LSTM cell graph.
Args:
inputs_shape: The inputs_shape must be known, and is [batch_size,
input_size] shape.
Raises:
ValueError: if the inputs_shape is invalid.
"""
if len(inputs_shape) != 2:
raise ValueError(
"inputs_shape must be 2-dimensional, saw shape: %s" % inputs_shape)
input_depth = (
inputs_shape[1]
if isinstance(inputs_shape[1], int) else inputs_shape[1].value)
if input_depth is None:
raise ValueError("Invalid inputs_shape, saw shape: %s" % inputs_shape)
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
input_weight_shape = [self._num_units, input_depth]
cell_weight_shape = [self._num_units, self._output_size]
bias_shape = [self._num_units]
def add_variable_wrapped(name, shape, initializer, index, partitioner):
var = self.add_weight(
name, shape=shape, initializer=initializer, partitioner=partitioner)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
weight_initializer = self._initializer
if self.dtype is None:
bias_initializer = init_ops.zeros_initializer
else:
bias_initializer = init_ops.zeros_initializer(dtype=self.dtype)
forget_bias_initializer = init_ops.constant_initializer(self._forget_bias)
self.input_to_input_w = add_variable_wrapped(
"input_to_input_w", input_weight_shape, weight_initializer, 1,
maybe_partitioner)
self.input_to_forget_w = add_variable_wrapped(
"input_to_forget_w", input_weight_shape, weight_initializer, 2,
maybe_partitioner)
self.input_to_cell_w = add_variable_wrapped(
"input_to_cell_w", input_weight_shape, weight_initializer, 3,
maybe_partitioner)
self.input_to_output_w = add_variable_wrapped(
"input_to_output_w", input_weight_shape, weight_initializer, 4,
maybe_partitioner)
self.cell_to_input_w = add_variable_wrapped(
"cell_to_input_w", cell_weight_shape, weight_initializer, 5,
maybe_partitioner)
self.cell_to_forget_w = add_variable_wrapped(
"cell_to_forget_w", cell_weight_shape, weight_initializer, 6,
maybe_partitioner)
self.cell_to_cell_w = add_variable_wrapped(
"cell_to_cell_w", cell_weight_shape, weight_initializer, 7,
maybe_partitioner)
self.cell_to_output_w = add_variable_wrapped(
"cell_to_output_w", cell_weight_shape, weight_initializer, 8,
maybe_partitioner)
self.input_bias = add_variable_wrapped(
"input_bias", bias_shape, bias_initializer, 12, maybe_partitioner)
self.forget_bias = add_variable_wrapped("forget_bias", bias_shape,
forget_bias_initializer, 13,
maybe_partitioner)
self.cell_bias = add_variable_wrapped(
"cell_bias", bias_shape, bias_initializer, 14, maybe_partitioner)
self.output_bias = add_variable_wrapped(
"output_bias", bias_shape, bias_initializer, 15, maybe_partitioner)
# index 9, 10, 11.
# f stands for forget, i stands for input and o stands for output.
if self._use_peepholes:
self._w_f_diag = add_variable_wrapped("w_f_diag", [self._num_units],
self._initializer, 10,
maybe_partitioner)
self._w_i_diag = add_variable_wrapped("w_i_diag", [self._num_units],
self._initializer, 9,
maybe_partitioner)
self._w_o_diag = add_variable_wrapped("w_o_diag", [self._num_units],
self._initializer, 11,
maybe_partitioner)
# index 16 for proj kernel.
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = add_variable_wrapped(
"projection/kernel", [self._num_proj, self._num_units],
self._initializer,
16,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, `[batch, num_units]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
# Make sure inputs and bias_initializer has the same type.
assert inputs.dtype == self.input_to_input_w.dtype
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
# Note: For TfLite, cell_state is at index 19 while activation state at
# index 18.
c_prev = self._tflite_wrapper.add_input(
c_prev,
tag="c_prev",
name="c_prev",
aggregate="first",
index_override=19)
m_prev = self._tflite_wrapper.add_input(
m_prev,
tag="m_prev",
name="m_prev",
aggregate="first",
index_override=18)
input_size = inputs.shape.with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.shape[-1]")
inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1)
# i stands for input gate.
# f stands for forget gate activation.
# o outputs.
# j output of LSTM unit.
# c is the final state.
# m is the output.
i = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_input_w, self.cell_to_input_w],
axis=1),
transpose_b=True), self.input_bias)
f = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_forget_w, self.cell_to_forget_w],
axis=1),
transpose_b=True), self.forget_bias)
o = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_output_w, self.cell_to_output_w],
axis=1),
transpose_b=True), self.output_bias)
j = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_cell_w, self.cell_to_cell_w],
axis=1),
transpose_b=True), self.cell_bias)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f) * c_prev + sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
transposed_proj_kernel = array_ops.transpose(self._proj_kernel)
m = math_ops.matmul(m, transposed_proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
c = self._tflite_wrapper.add_output(
c, tag="c", name="c", aggregate="last", index_override=1)
m = self._tflite_wrapper.add_output(
m, tag="m", name="m", index_override=2, aggregate="stack")
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TFLiteLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/rnn_cell.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=0, name="rnn1"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, num_proj=8, forget_bias=0, name="rnn2"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, forget_bias=0, name="rnn4")
])
def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
lstm cell.
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
if is_dynamic_rnn:
lstm_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_lstm_layer,
bw_lstm_layer,
lstm_inputs,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, fw_lstm_layer, bw_lstm_layer, sess, saver,
is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_lstm_layer: The forward lstm layer either a single lstm cell or a multi
lstm cell.
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(fw_lstm_layer, bw_lstm_layer,
is_dynamic_rnn)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildLstmLayer(),
self.buildLstmLayer(), False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), self.buildLstmLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildLstmLayer(),
self.buildLstmLayer(), True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(),
self.buildLstmLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_lstm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(UnidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self, rnn_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
rnn_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_layer, rnn_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
sess.run(tf.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, rnn_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(rnn_layer, is_dynamic_rnn)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_rnn_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=1.0, name="rnn1"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, num_proj=8, forget_bias=1.0, name="rnn2"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_units, forget_bias=1.0, name="rnn4")
])
def buildModel(self, lstm_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
lstm_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, lstm_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(lstm_layer, is_dynamic_rnn)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_lstm_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TfLite LSTMCell wrapper.
TODO(renjieliu): Find a better home for this one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python.op_hint import OpHint
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.rnn import _best_effort_input_batch_size
from tensorflow.python.ops.rnn import _dynamic_rnn_loop
from tensorflow.python.ops.rnn import _should_cache
from tensorflow.python.ops.rnn import _transpose_batch_time
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["lite.experimental.nn.dynamic_rnn"])
def dynamic_rnn(cell,
inputs,
sequence_length=None,
initial_state=None,
dtype=None,
parallel_iterations=None,
swap_memory=False,
time_major=True,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.nn.rnn_cell.LSTMStateTuple for each cell
outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If `time_major == True`, this must be a `Tensor` of shape: `[max_time,
batch_size, ...]`, or a nested tuple of such elements. This may also be
a (possibly nested) tuple of Tensors satisfying this property. The
first two dimensions must match across all the inputs, but otherwise the
ranks and other shape components may differ. In this case, input to
`cell` at each time-step will replicate the structure of these tuples,
except for the time dimension (from which the time is taken). The input
to `cell` at each time step will be a `Tensor` or (possibly nested)
tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used
to copy-through state and zero-out outputs when past a batch element's
sequence length. So it's more for performance than correctness.
initial_state: (optional) An initial state for the RNN. If `cell.state_size`
is an integer, this must be a `Tensor` of appropriate type and shape
`[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this
should be a tuple of tensors having shapes `[batch_size, s] for s in
cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency and
can be run in parallel, will be. This parameter trades off time for
space. Values >> 1 use more memory but take less time, while smaller
values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs which
would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
`time_major = True` is a bit more efficient because it avoids transposes
at the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
RuntimeError: If not using control flow v2.
"""
# Currently only support time_major == True case.
assert time_major
# TODO(b/123051275): We need to check if the cells are TfLiteLSTMCells or
# TfLiteRNNCells.
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
raise RuntimeError("OpHint dynamic rnn only supports control flow v2.")
parent_first_child_input = [{
"parent_ophint_input_index": 0,
"first_child_ophint_input_index": 0
}]
parent_last_child_output = [{
"parent_output_index": 0,
# For LstmCell, the index is 2.
# For RnnCell, the index is 1.
# So we use -1 meaning it's the last one.
"child_output_index": -1
}]
internal_children_input_output = [{
"child_input_index": 0,
# For LstmCell, the index is 2.
# For RnnCell, the index is 1.
# So we use -1 meaning it's the last one.
"child_output_index": -1
}]
inputs_outputs_mappings = {
"parent_first_child_input": parent_first_child_input,
"parent_last_child_output": parent_last_child_output,
"internal_children_input_output": internal_children_input_output
}
tflite_wrapper = OpHint(
"TfLiteDynamicRnn",
level=2,
children_inputs_mappings=inputs_outputs_mappings)
with vs.variable_scope(scope or "rnn") as varscope:
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
inputs = tflite_wrapper.add_input(inputs, name="input", index_override=0)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (batch, time, depth) => (time, batch, depth)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
if sequence_length.shape.rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.shape)
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length,
name="sequence_length")
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
if getattr(cell, "get_initial_state", None) is not None:
state = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [
"Expected shape for Tensor %s is " % x.name, packed_shape,
" but saw shape: ", x_shape
])
if not context.executing_eagerly() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
outputs, final_state = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (time, batch, depth) => (batch, time, depth)
outputs = nest.map_structure(_transpose_batch_time, outputs)
outputs = tflite_wrapper.add_output(outputs, name="outputs")
return outputs, final_state
def bidirectional_dynamic_rnn(cell_fw,
cell_bw,
inputs,
sequence_length=None,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
parallel_iterations=None,
swap_memory=False,
time_major=False,
scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape: `[max_time,
batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch. If
not provided, all batch entries are assumed to be full sequences; and time
reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN. This must
be a tensor of appropriate type and shape `[batch_size,
cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a
tuple of tensors having shapes `[batch_size, s] for s in
cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using the
corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency and
can be run in parallel, will be. This parameter trades off time for
space. Values >> 1 use more memory but take less time, while smaller
values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs which
would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
`time_major = True` is a bit more efficient because it avoids transposes
at the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_fw,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=time_major,
scope=fw_scope)
# Backward direction
if not time_major:
time_axis = 1
batch_axis = 0
else:
time_axis = 0
batch_axis = 1
def _reverse(input_, seq_lengths, seq_axis, batch_axis):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_,
seq_lengths=seq_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis)
else:
return array_ops.reverse(input_, axis=[seq_axis])
with vs.variable_scope("bw") as bw_scope:
def _map_reverse(inp):
return _reverse(
inp,
seq_lengths=sequence_length,
seq_axis=time_axis,
batch_axis=batch_axis)
inputs_reverse = nest.map_structure(_map_reverse, inputs)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw,
inputs=inputs_reverse,
sequence_length=sequence_length,
initial_state=initial_state_bw,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
time_major=time_major,
scope=bw_scope)
output_bw = _reverse(
tmp,
seq_lengths=sequence_length,
seq_axis=time_axis,
batch_axis=batch_axis)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
|
tensorflow-master
|
tensorflow/lite/experimental/examples/lstm/rnn.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops util to handle ops for Lite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.lite.python import wrap_toco
from tensorflow.python.util.tf_export import tf_export
class SupportedOp(collections.namedtuple("SupportedOp", ["op"])):
"""Spec of supported ops.
Args:
op: string of op name.
"""
@tf_export(v1=["lite.experimental.get_potentially_supported_ops"])
def get_potentially_supported_ops():
"""Returns operations potentially supported by TensorFlow Lite.
The potentially support list contains a list of ops that are partially or
fully supported, which is derived by simply scanning op names to check whether
they can be handled without real conversion and specific parameters.
Given that some ops may be partially supported, the optimal way to determine
if a model's operations are supported is by converting using the TensorFlow
Lite converter.
Returns:
A list of SupportedOp.
"""
ops = wrap_toco.wrapped_get_potentially_supported_ops()
return [SupportedOp(o["op"]) for o in ops]
|
tensorflow-master
|
tensorflow/lite/experimental/tensorboard/ops_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.experimental.tensorboard import ops_util
from tensorflow.python.platform import test
class OpsUtilTest(test.TestCase):
def testGetPotentiallySupportedOps(self):
ops = ops_util.get_potentially_supported_ops()
# See GetTensorFlowNodeConverterMap() in
# tensorflow/lite/toco/import_tensorflow.cc
self.assertIsInstance(ops, list)
# Test partial ops that surely exist in the list.
self.assertIn(ops_util.SupportedOp("Add"), ops)
self.assertIn(ops_util.SupportedOp("Log"), ops)
self.assertIn(ops_util.SupportedOp("Sigmoid"), ops)
self.assertIn(ops_util.SupportedOp("Softmax"), ops)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/tensorboard/ops_util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AudioMicrofrontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import test_util
SAMPLE_RATE = 1000
WINDOW_SIZE = 25
WINDOW_STEP = 10
NUM_CHANNELS = 2
UPPER_BAND_LIMIT = 450.0
LOWER_BAND_LIMIT = 8.0
SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase):
@test_util.run_v1_only("b/120545219")
def testSimple(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True)
self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]])
@test_util.run_v1_only("b/120545219")
def testSimpleFloatScaled(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
out_scale=64,
out_type=tf.float32)
self.assertAllEqual(filterbanks.eval(),
[[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]])
@test_util.run_v1_only("b/120545219")
def testStacking(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
right_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 436, 378], [410, 350, 391, 325]])
def testStackingWithOverlap(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
right_context=1)
self.assertAllEqual(
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
@test_util.run_v1_only("b/120545219")
def testStackingDropFrame(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 479, 425], [436, 378, 410, 350]])
def testZeroPadding(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 7 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=2,
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AudioMicrofrontend Op creates filterbanks from audio data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_audio_microfrontend_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_audio_microfrontend_op.so"))
def audio_microfrontend(audio,
sample_rate=16000,
window_size=25,
window_step=10,
num_channels=32,
upper_band_limit=7500.0,
lower_band_limit=125.0,
smoothing_bits=10,
even_smoothing=0.025,
odd_smoothing=0.06,
min_signal_remaining=0.05,
enable_pcan=True,
pcan_strength=0.95,
pcan_offset=80.0,
gain_bits=21,
enable_log=True,
scale_shift=6,
left_context=0,
right_context=0,
frame_stride=1,
zero_padding=False,
out_scale=1,
out_type=tf.uint16):
"""Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more
feature vectors containing filterbanks of the input. The
conversion process uses a lightweight library to perform:
1. A slicing window function
2. Short-time FFTs
3. Filterbank calculations
4. Noise reduction
5. PCAN Auto Gain Control
6. Logarithmic scaling
Args:
audio: 1D Tensor, int16 audio data in temporal ordering.
sample_rate: Integer, the sample rate of the audio in Hz.
window_size: Integer, length of desired time frames in ms.
window_step: Integer, length of step size for the next frame in ms.
num_channels: Integer, the number of filterbank channels to use.
upper_band_limit: Float, the highest frequency included in the filterbanks.
lower_band_limit: Float, the lowest frequency included in the filterbanks.
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
even_smoothing: Float, smoothing coefficient for even-numbered channels.
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
enable_pcan: Bool, enable PCAN auto gain control.
pcan_strength: Float, gain normalization exponent.
pcan_offset: Float, positive value added in the normalization denominator.
gain_bits: Int, number of fractional bits in the gain.
enable_log: Bool, enable logarithmic scaling of filterbanks.
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
left_context: Integer, number of preceding frames to attach to each frame.
right_context: Integer, number of preceding frames to attach to each frame.
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
out_scale: Integer, divide all filterbanks by this number.
out_type: DType, type of the output Tensor, defaults to UINT16.
Returns:
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
Raises:
ValueError: If the audio tensor is not explicitly a vector.
"""
audio_shape = audio.shape
if audio_shape.ndims is None:
raise ValueError("Input to `AudioMicrofrontend` should have known rank.")
if len(audio_shape) > 1:
audio = tf.reshape(audio, [-1])
return gen_audio_microfrontend_op.audio_microfrontend(
audio, sample_rate, window_size, window_step, num_channels,
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
right_context, frame_stride, zero_padding, out_scale, out_type)
tf.NotDifferentiable("AudioMicrofrontend")
|
tensorflow-master
|
tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py
|
#!/usr/bin/env python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tool creates an html visualization of a TensorFlow Lite graph.
Example usage:
python visualize.py foo.tflite foo.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from tensorflow.python.platform import resource_loader
# Schema to use for flatbuffers
_SCHEMA = "third_party/tensorflow/lite/schema/schema.fbs"
# TODO(angerson): fix later when rules are simplified..
_SCHEMA = resource_loader.get_path_to_datafile("../schema/schema.fbs")
_BINARY = resource_loader.get_path_to_datafile("../../../flatbuffers/flatc")
# Account for different package positioning internal vs. external.
if not os.path.exists(_BINARY):
_BINARY = resource_loader.get_path_to_datafile(
"../../../../flatbuffers/flatc")
if not os.path.exists(_SCHEMA):
raise RuntimeError("Sorry, schema file cannot be found at %r" % _SCHEMA)
if not os.path.exists(_BINARY):
raise RuntimeError("Sorry, flatc is not available at %r" % _BINARY)
# A CSS description for making the visualizer
_CSS = """
<html>
<head>
<style>
body {font-family: sans-serif; background-color: #ffaa00;}
table {background-color: #eeccaa;}
th {background-color: black; color: white;}
h1 {
background-color: ffaa00;
padding:5px;
color: black;
}
div {
border-radius: 5px;
background-color: #ffeecc;
padding:5px;
margin:5px;
}
.tooltip {color: blue;}
.tooltip .tooltipcontent {
visibility: hidden;
color: black;
background-color: yellow;
padding: 5px;
border-radius: 4px;
position: absolute;
z-index: 1;
}
.tooltip:hover .tooltipcontent {
visibility: visible;
}
.edges line {
stroke: #333333;
}
.nodes text {
color: black;
pointer-events: none;
font-family: sans-serif;
font-size: 11px;
}
</style>
<script src="https://d3js.org/d3.v4.min.js"></script>
</head>
<body>
"""
_D3_HTML_TEMPLATE = """
<script>
// Build graph data
var graph = %s;
var svg = d3.select("#subgraph%d");
var width = svg.attr("width");
var height = svg.attr("height");
var color = d3.scaleOrdinal(d3.schemeCategory20);
var simulation = d3.forceSimulation()
.force("link", d3.forceLink().id(function(d) {return d.id;}))
.force("charge", d3.forceManyBody())
.force("center", d3.forceCenter(0.5 * width, 0.5 * height));
function buildGraph() {
var edge = svg.append("g").attr("class", "edges").selectAll("line")
.data(graph.edges).enter().append("line")
// Make the node group
var node = svg.selectAll(".nodes")
.data(graph.nodes)
.enter().append("g")
.attr("class", "nodes")
.call(d3.drag()
.on("start", function(d) {
if(!d3.event.active) simulation.alphaTarget(1.0).restart();
d.fx = d.x;d.fy = d.y;
})
.on("drag", function(d) {
d.fx = d3.event.x; d.fy = d3.event.y;
})
.on("end", function(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = d.fy = null;
}));
// Within the group, draw a circle for the node position and text
// on the side.
node.append("circle")
.attr("r", "5px")
.attr("fill", function(d) { return color(d.group); })
node.append("text")
.attr("dx", 8).attr("dy", 5).text(function(d) { return d.name; });
// Setup force parameters and update position callback
simulation.nodes(graph.nodes).on("tick", forceSimulationUpdated);
simulation.force("link").links(graph.edges);
function forceSimulationUpdated() {
// Update edges.
edge.attr("x1", function(d) {return d.source.x;})
.attr("y1", function(d) {return d.source.y;})
.attr("x2", function(d) {return d.target.x;})
.attr("y2", function(d) {return d.target.y;});
// Update node positions
node.attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; });
}
}
buildGraph()
</script>
"""
class OpCodeMapper(object):
"""Maps an opcode index to an op name."""
def __init__(self, data):
self.code_to_name = {}
for idx, d in enumerate(data["operator_codes"]):
self.code_to_name[idx] = d["builtin_code"]
def __call__(self, x):
if x not in self.code_to_name:
s = "<UNKNOWN>"
else:
s = self.code_to_name[x]
return "%s (opcode=%d)" % (s, x)
class DataSizeMapper(object):
"""For buffers, report the number of bytes."""
def __call__(self, x):
if x is not None:
return "%d bytes" % len(x)
else:
return "--"
class TensorMapper(object):
"""Maps a list of tensor indices to a tooltip hoverable indicator of more."""
def __init__(self, subgraph_data):
self.data = subgraph_data
def __call__(self, x):
html = ""
html += "<span class='tooltip'><span class='tooltipcontent'>"
for i in x:
tensor = self.data["tensors"][i]
html += str(i) + " "
html += tensor["name"] + " "
html += str(tensor["type"]) + " "
html += (repr(tensor["shape"]) if "shape" in tensor else "[]") + "<br>"
html += "</span>"
html += repr(x)
html += "</span>"
return html
def GenerateGraph(subgraph_idx, g, opcode_mapper):
"""Produces the HTML required to have a d3 visualization of the dag."""
def TensorName(idx):
return "t%d" % idx
def OpName(idx):
return "o%d" % idx
edges = []
nodes = []
first = {}
pixel_mult = 50 # TODO(aselle): multiplier for initial placement
for op_index, op in enumerate(g["operators"]):
for tensor_input_position, tensor_index in enumerate(op["inputs"]):
if tensor_index not in first:
first[tensor_index] = (
op_index * pixel_mult,
tensor_input_position * pixel_mult - pixel_mult / 2)
edges.append({
"source": TensorName(tensor_index),
"target": OpName(op_index)
})
for tensor_index in op["outputs"]:
edges.append({
"target": TensorName(tensor_index),
"source": OpName(op_index)
})
nodes.append({
"id": OpName(op_index),
"name": opcode_mapper(op["opcode_index"]),
"group": 2,
"x": pixel_mult,
"y": op_index * pixel_mult
})
for tensor_index, tensor in enumerate(g["tensors"]):
initial_y = (
first[tensor_index] if tensor_index in first else len(g["operators"]))
nodes.append({
"id": TensorName(tensor_index),
"name": "%s (%d)" % (tensor["name"], tensor_index),
"group": 1,
"x": 2,
"y": initial_y
})
graph_str = json.dumps({"nodes": nodes, "edges": edges})
html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx)
return html
def GenerateTableHtml(items, keys_to_print, display_index=True):
"""Given a list of object values and keys to print, make an HTML table.
Args:
items: Items to print an array of dicts.
keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
items[0][key] should exist. display_fn is the mapping function on display.
i.e. the displayed html cell will have the string returned by
`mapping_fn(items[0][key])`.
display_index: add a column which is the index of each row in `items`.
Returns:
An html table.
"""
html = ""
# Print the list of items
html += "<table><tr>\n"
html += "<tr>\n"
if display_index:
html += "<th>index</th>"
for h, mapper in keys_to_print:
html += "<th>%s</th>" % h
html += "</tr>\n"
for idx, tensor in enumerate(items):
html += "<tr>\n"
if display_index:
html += "<td>%d</td>" % idx
# print tensor.keys()
for h, mapper in keys_to_print:
val = tensor[h] if h in tensor else None
val = val if mapper is None else mapper(val)
html += "<td>%s</td>\n" % val
html += "</tr>\n"
html += "</table>\n"
return html
def CreateHtmlFile(tflite_input, html_output):
"""Given a tflite model in `tflite_input` file, produce html description."""
# Convert the model into a JSON flatbuffer using flatc (build if doesn't
# exist.
if not os.path.exists(tflite_input):
raise RuntimeError("Invalid filename %r" % tflite_input)
if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"):
# Run convert
cmd = (
_BINARY + " -t "
"--strict-json --defaults-json -o /tmp {schema} -- {input}".format(
input=tflite_input, schema=_SCHEMA))
print(cmd)
os.system(cmd)
real_output = ("/tmp/" + os.path.splitext(
os.path.split(tflite_input)[-1])[0] + ".json")
data = json.load(open(real_output))
elif tflite_input.endswith(".json"):
data = json.load(open(tflite_input))
else:
raise RuntimeError("Input file was not .tflite or .json")
html = ""
html += _CSS
html += "<h1>TensorFlow Lite Model</h2>"
data["filename"] = tflite_input # Avoid special case
toplevel_stuff = [("filename", None), ("version", None), ("description",
None)]
html += "<table>\n"
for key, mapping in toplevel_stuff:
if not mapping:
mapping = lambda x: x
html += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, mapping(data.get(key)))
html += "</table>\n"
# Spec on what keys to display
buffer_keys_to_display = [("data", DataSizeMapper())]
operator_keys_to_display = [("builtin_code", None), ("custom_code", None),
("version", None)]
for subgraph_idx, g in enumerate(data["subgraphs"]):
# Subgraph local specs on what to display
html += "<div class='subgraph'>"
tensor_mapper = TensorMapper(g)
opcode_mapper = OpCodeMapper(data)
op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper),
("builtin_options", None), ("opcode_index",
opcode_mapper)]
tensor_keys_to_display = [("name", None), ("type", None), ("shape", None),
("buffer", None), ("quantization", None)]
html += "<h2>Subgraph %d</h2>\n" % subgraph_idx
# Inputs and outputs.
html += "<h3>Inputs/Outputs</h3>\n"
html += GenerateTableHtml(
[{
"inputs": g["inputs"],
"outputs": g["outputs"]
}], [("inputs", tensor_mapper), ("outputs", tensor_mapper)],
display_index=False)
# Print the tensors.
html += "<h3>Tensors</h3>\n"
html += GenerateTableHtml(g["tensors"], tensor_keys_to_display)
# Print the ops.
html += "<h3>Ops</h3>\n"
html += GenerateTableHtml(g["operators"], op_keys_to_display)
# Visual graph.
html += "<svg id='subgraph%d' width='960' height='1600'></svg>\n" % (
subgraph_idx,)
html += GenerateGraph(subgraph_idx, g, opcode_mapper)
html += "</div>"
# Buffers have no data, but maybe in the future they will
html += "<h2>Buffers</h2>\n"
html += GenerateTableHtml(data["buffers"], buffer_keys_to_display)
# Operator codes
html += "<h2>Operator Codes</h2>\n"
html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display)
html += "</body></html>\n"
open(html_output, "w").write(html)
def main(argv):
try:
tflite_input = argv[1]
html_output = argv[2]
except IndexError:
print("Usage: %s <input tflite> <output html>" % (argv[0]))
else:
CreateHtmlFile(tflite_input, html_output)
if __name__ == "__main__":
main(sys.argv)
|
tensorflow-master
|
tensorflow/lite/tools/visualize.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite is for mobile and embedded devices.
TensorFlow Lite is the official solution for running machine learning models on
mobile and embedded devices. It enables on-device machine learning inference
with low latency and a small binary size on Android, iOS, and other operating
systems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import subprocess
from distutils.command.build_ext import build_ext
import numpy
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py
PACKAGE_NAME = 'tflite-runtime'
PACKAGE_VERSION = os.environ['TENSORFLOW_VERSION']
DOCLINES = __doc__.split('\n')
PACKAGE = 'tflite_runtime.lite.python'
TENSORFLOW_DIR = os.environ['TENSORFLOW_SRC_ROOT']
# Setup cross compiling
TARGET = (
os.environ['TENSORFLOW_TARGET'] if 'TENSORFLOW_TARGET' in os.environ
else None)
if TARGET == 'rpi':
os.environ['CXX'] = 'arm-linux-gnueabihf-g++'
os.environ['CC'] = 'arm-linux-gnueabihf-g++'
MAKE_CROSS_OPTIONS = ['TARGET=%s' % TARGET] if TARGET else []
RELATIVE_MAKE_DIR = os.path.join('tensorflow', 'lite', 'tools', 'make')
MAKE_DIR = os.path.join(TENSORFLOW_DIR, RELATIVE_MAKE_DIR)
DOWNLOADS_DIR = os.path.join(MAKE_DIR, 'downloads')
RELATIVE_MAKEFILE_PATH = os.path.join(RELATIVE_MAKE_DIR, 'Makefile')
DOWNLOAD_SCRIPT_PATH = os.path.join(MAKE_DIR, 'download_dependencies.sh')
# Check physical memory and if we are on a reasonable non small SOC machine
# with more than 4GB, use all the CPUs, otherwisxe only 1.
def get_build_cpus():
physical_bytes = os.sysconf('SC_PAGESIZE') * os.sysconf('SC_PHYS_PAGES')
if physical_bytes < (1<<30) * 4:
return 1
else:
return multiprocessing.cpu_count()
def make_args(target='', quiet=True):
"""Construct make command line."""
args = (['make', 'SHELL=/bin/bash', '-C', TENSORFLOW_DIR]
+ MAKE_CROSS_OPTIONS +
['-f', RELATIVE_MAKEFILE_PATH, '-j',
str(get_build_cpus())])
if quiet:
args.append('--quiet')
if target:
args.append(target)
return args
def make_output(target):
"""Invoke make on the target and return output."""
return subprocess.check_output(make_args(target)).decode('utf-8').strip()
def make():
"""Invoke make to build tflite C++ sources.
Build dependencies:
apt-get install swig libjpeg-dev zlib1g-dev python3-dev python3-nump
"""
subprocess.check_call(make_args(quiet=False))
def download_dependencies():
"""Download build dependencies if haven't done yet."""
if not os.path.isdir(DOWNLOADS_DIR) or not os.listdir(DOWNLOADS_DIR):
subprocess.check_call(DOWNLOAD_SCRIPT_PATH)
class CustomBuildExt(build_ext, object):
def run(self):
download_dependencies()
make()
return super(CustomBuildExt, self).run()
class CustomBuildPy(build_py, object):
def run(self):
self.run_command('build_ext')
return super(CustomBuildPy, self).run()
LIB_TFLITE = 'tensorflow-lite'
LIB_TFLITE_DIR = make_output('libdir')
ext = Extension(
name='%s._interpreter_wrapper' % PACKAGE,
language='c++',
sources=['interpreter_wrapper/interpreter_wrapper.i',
'interpreter_wrapper/interpreter_wrapper.cc'],
swig_opts=['-c++',
'-I%s' % TENSORFLOW_DIR,
'-module', 'interpreter_wrapper',
'-outdir', '.'],
extra_compile_args=['-std=c++11'],
include_dirs=[TENSORFLOW_DIR,
os.path.join(TENSORFLOW_DIR, 'tensorflow', 'lite', 'tools',
'pip_package'),
numpy.get_include(),
os.path.join(DOWNLOADS_DIR, 'flatbuffers', 'include'),
os.path.join(DOWNLOADS_DIR, 'absl')],
libraries=[LIB_TFLITE],
library_dirs=[LIB_TFLITE_DIR])
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/lite/',
author='Google Inc.',
author_email='[email protected]',
license='Apache 2.0',
include_package_data=True,
keywords='tflite tensorflow tensor machine learning',
packages=find_packages(exclude=[]),
ext_modules=[ext],
package_dir={PACKAGE: '.'},
cmdclass={
'build_ext': CustomBuildExt,
'build_py': CustomBuildPy,
}
)
|
tensorflow-master
|
tensorflow/lite/tools/pip_package/setup.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to convert ILSVRC devkit validation ground truth to synset labels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from os import path
import sys
import scipy.io
_SYNSET_ARRAYS_RELATIVE_PATH = 'data/meta.mat'
_VALIDATION_FILE_RELATIVE_PATH = 'data/ILSVRC2012_validation_ground_truth.txt'
def _synset_to_word(filepath):
"""Returns synset to word dictionary by reading sysnset arrays."""
mat = scipy.io.loadmat(filepath)
entries = mat['synsets']
# These fields are listed in devkit readme.txt
fields = [
'synset_id', 'WNID', 'words', 'gloss', 'num_children', 'children',
'wordnet_height', 'num_train_images'
]
synset_index = fields.index('synset_id')
words_index = fields.index('words')
synset_to_word = {}
for entry in entries:
entry = entry[0]
synset_id = int(entry[synset_index][0])
first_word = entry[words_index][0].split(',')[0]
synset_to_word[synset_id] = first_word
return synset_to_word
def _validation_file_path(ilsvrc_dir):
return path.join(ilsvrc_dir, _VALIDATION_FILE_RELATIVE_PATH)
def _synset_array_path(ilsvrc_dir):
return path.join(ilsvrc_dir, _SYNSET_ARRAYS_RELATIVE_PATH)
def _generate_validation_labels(ilsvrc_dir, output_file):
synset_to_word = _synset_to_word(_synset_array_path(ilsvrc_dir))
with open(_validation_file_path(ilsvrc_dir), 'r') as synset_id_file, open(
output_file, 'w') as output:
for synset_id in synset_id_file:
synset_id = int(synset_id)
output.write('%s\n' % synset_to_word[synset_id])
def _check_arguments(args):
if not args.validation_labels_output:
raise ValueError('Invalid path to output file.')
ilsvrc_dir = args.ilsvrc_devkit_dir
if not ilsvrc_dir or not path.isdir(ilsvrc_dir):
raise ValueError('Invalid path to ilsvrc_dir')
if not path.exists(_validation_file_path(ilsvrc_dir)):
raise ValueError('Invalid path to ilsvrc_dir, cannot find validation file.')
if not path.exists(_synset_array_path(ilsvrc_dir)):
raise ValueError(
'Invalid path to ilsvrc_dir, cannot find synset arrays file.')
def main():
parser = argparse.ArgumentParser(
description='Converts ILSVRC devkit validation_ground_truth.txt to synset'
' labels file that can be used by the accuracy script.')
parser.add_argument(
'--validation_labels_output',
type=str,
help='Full path for outputting validation labels.')
parser.add_argument(
'--ilsvrc_devkit_dir',
type=str,
help='Full path to ILSVRC 2012 devikit directory.')
args = parser.parse_args()
try:
_check_arguments(args)
except ValueError as e:
parser.print_usage()
file_name = path.basename(sys.argv[0])
sys.stderr.write('{0}: error: {1}\n'.format(file_name, str(e)))
sys.exit(1)
_generate_validation_labels(args.ilsvrc_devkit_dir,
args.validation_labels_output)
if __name__ == '__main__':
main()
|
tensorflow-master
|
tensorflow/lite/tools/accuracy/ilsvrc/generate_validation_labels.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to convert SavedModel to frozen GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import util
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
def _log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in saved_model's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Validate saved_model and extract MetaGraphDef.
Args:
saved_model_dir: saved_model path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
with session.Session(graph=ops.Graph()) as sess:
return loader.load(sess, tag_set, saved_model_dir)
def get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
"values are '{}'.".format(signature_key,
",".join(signature_def_keys)))
return signature_def_map[signature_key]
def get_inputs_outputs(signature_def):
"""Get inputs and outputs from SignatureDef.
Args:
signature_def: SignatureDef in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
_log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
_log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def _get_tensors(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key):
"""Converts a SavedModel to a frozen graph.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input arrays
from SignatureDef when none are provided.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
frozen_graph_def: Frozen GraphDef.
in_tensors: List of input tensors for the graph.
out_tensors: List of output tensors for the graph.
Raises:
ValueError:
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
signature_key is not in the MetaGraphDef.
assets/ directory is in the MetaGraphDef.
input_shapes does not match the length of input_arrays.
input_arrays or output_arrays are not valid.
"""
# Read SignatureDef.
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
signature_def = get_signature_def(meta_graph, signature_key)
inputs, outputs = get_inputs_outputs(signature_def)
# Check SavedModel for assets directory.
collection_def = meta_graph.collection_def
if constants.ASSETS_KEY in collection_def:
raise ValueError("SavedModels with assets/ directory are not supported.")
graph = ops.Graph()
with session.Session(graph=graph) as sess:
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
# Gets input and output tensors.
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
in_tensors = _get_tensors(graph, inputs, input_arrays)
out_tensors = _get_tensors(graph, outputs, output_arrays)
util.set_tensor_shapes(in_tensors, input_shapes)
frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors)
return frozen_graph_def, in_tensors, out_tensors
|
tensorflow-master
|
tensorflow/lite/python/convert_saved_model.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wraps toco interface with python lazy loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.lazy_loader import LazyLoader
# TODO(b/131123224): Lazy load since some of the performance benchmark skylark
# rules and monolithic build break dependencies.
_toco_python = LazyLoader(
"tensorflow_wrap_toco", globals(),
"tensorflow.lite.toco.python."
"tensorflow_wrap_toco")
del LazyLoader
def wrapped_toco_convert(model_flags_str, toco_flags_str, input_data_str):
"""Wraps TocoConvert with lazy loader."""
return _toco_python.TocoConvert(model_flags_str, toco_flags_str,
input_data_str)
def wrapped_get_potentially_supported_ops():
"""Wraps TocoGetPotentiallySupportedOps with lazy loader."""
return _toco_python.TocoGetPotentiallySupportedOps()
|
tensorflow-master
|
tensorflow/lite/python/wrap_toco.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFLite SavedModel conversion test cases.
- Tests converting simple SavedModel graph to TFLite FlatBuffer.
- Tests converting simple SavedModel graph to frozen graph.
- Tests converting MNIST SavedModel to TFLite FlatBuffer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.lite.python import convert_saved_model
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
class FreezeSavedModelTest(test_util.TensorFlowTestCase):
def _createSimpleSavedModel(self, shape):
"""Create a simple SavedModel on the fly."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
inputs = {"x": in_tensor}
outputs = {"y": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _createSavedModelTwoInputArrays(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputB")
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputA")
out_tensor = in_tensor_1 + in_tensor_2
inputs = {"x": in_tensor_1, "y": in_tensor_2}
outputs = {"z": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _getArrayNames(self, tensors):
return [tensor.name for tensor in tensors]
def _getArrayShapes(self, tensors):
dims = []
for tensor in tensors:
dim_tensor = []
for dim in tensor.shape:
if isinstance(dim, tensor_shape.Dimension):
dim_tensor.append(dim.value)
else:
dim_tensor.append(dim)
dims.append(dim_tensor)
return dims
def _convertSavedModel(self,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
if tag_set is None:
tag_set = set([tag_constants.SERVING])
if signature_key is None:
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, in_tensors, out_tensors = convert_saved_model.freeze_saved_model(
saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
input_shapes=input_shapes,
output_arrays=output_arrays,
tag_set=tag_set,
signature_key=signature_key)
return graph_def, in_tensors, out_tensors
def testSimpleSavedModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testSimpleSavedModelWithNoneBatchSizeInShape(self):
"""Test a SavedModel with None in input tensor's shape."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithInvalidSignatureKey(self):
"""Test a SavedModel that fails due to an invalid signature_key."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, signature_key="invalid-key")
self.assertEqual(
"No 'invalid-key' in the SavedModel's SignatureDefs. "
"Possible values are 'serving_default'.", str(error.exception))
def testSimpleSavedModelWithInvalidOutputArray(self):
"""Test a SavedModel that fails due to invalid output arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, output_arrays=["invalid-output"])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
def testSimpleSavedModelWithWrongInputArrays(self):
"""Test a SavedModel that fails due to invalid input arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
# Check invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, input_arrays=["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Check valid and invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(
saved_model_dir, input_arrays=["Placeholder", "invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSimpleSavedModelWithCorrectArrays(self):
"""Test a SavedModel with correct input_arrays and output_arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
output_arrays=["add"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithCorrectInputArrays(self):
"""Test a SavedModel with correct input_arrays and input_shapes."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
input_shapes={"Placeholder": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testTwoInputArrays(self):
"""Test a simple SavedModel."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputB", "inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0", "inputB:0"])
self.assertEqual(
self._getArrayShapes(in_tensors), [[1, 16, 16, 3], [1, 16, 16, 3]])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
# Check case where input shape is given.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["inputA"],
input_shapes={"inputA": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
# Check case where input shape is None.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testMultipleMetaGraphDef(self):
"""Test saved model with multiple MetaGraphDefs."""
saved_model_dir = os.path.join(self.get_temp_dir(), "savedmodel_two_mgd")
builder = saved_model.builder.SavedModelBuilder(saved_model_dir)
with session.Session(graph=ops.Graph()) as sess:
# MetaGraphDef 1
in_tensor = array_ops.placeholder(shape=[1, 28, 28], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sig_input_tensor = saved_model.utils.build_tensor_info(in_tensor)
sig_input_tensor_signature = {"x": sig_input_tensor}
sig_output_tensor = saved_model.utils.build_tensor_info(out_tensor)
sig_output_tensor_signature = {"y": sig_output_tensor}
predict_signature_def = (
saved_model.signature_def_utils.build_signature_def(
sig_input_tensor_signature, sig_output_tensor_signature,
saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_map = {
saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
}
builder.add_meta_graph_and_variables(
sess,
tags=[saved_model.tag_constants.SERVING, "additional_test_tag"],
signature_def_map=signature_def_map)
# MetaGraphDef 2
builder.add_meta_graph(tags=["tflite"])
builder.save(True)
# Convert to tflite
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
tag_set=set([saved_model.tag_constants.SERVING, "additional_test_tag"]))
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 28, 28]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/python/convert_saved_model_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used by multiple converter files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.protobuf import config_pb2 as _config_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.float16: _types_pb2.FLOAT16,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.int8: _types_pb2.INT8,
dtypes.complex64: _types_pb2.COMPLEX64
}
_LOWER_USING_SWITCH_MERGE = "_lower_using_switch_merge"
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
def get_tensor_name(tensor):
"""Returns name of the input tensor.
Args:
tensor: tf.Tensor
Returns:
str
"""
parts = tensor.name.split(":")
if len(parts) > 2:
raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format(
len(parts) - 1))
# To be consistent with the tensor naming scheme in tensorflow, we need
# drop the ':0' suffix for the first tensor.
if len(parts) > 1 and parts[1] != "0":
return tensor.name
return parts[0]
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {}
for op in graph.get_operations():
for tensor in op.values():
tensor_name_to_tensor[get_tensor_name(tensor)] = tensor
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Raises:
ValueError:
`shapes` contains an invalid tensor.
`shapes` contains an invalid shape for a valid tensor.
"""
if shapes:
tensor_names_to_tensor = {
get_tensor_name(tensor): tensor for tensor in tensors
}
for name, shape in shapes.items():
if name not in tensor_names_to_tensor:
raise ValueError("Invalid tensor \'{}\' found in tensor shapes "
"map.".format(name))
if shape is not None:
tensor = tensor_names_to_tensor[name]
try:
tensor.set_shape(shape)
except ValueError as error:
message = ("The shape of tensor '{0}' cannot be changed from {1} to "
"{2}. {3}".format(name, tensor.shape, shape, str(error)))
raise ValueError(message)
def get_grappler_config(optimizers_list):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers_list: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
config = _config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
for optimizer in optimizers_list:
rewrite_options.optimizers.append(optimizer)
return config
def run_graph_optimizations(graph_def,
input_arrays,
output_arrays,
config,
graph=None):
"""Apply standard TensorFlow optimizations to the graph_def.
Args:
graph_def: Frozen GraphDef to be optimized.
input_arrays: List of arrays that are considered inputs of the graph.
output_arrays: List of arrays that are considered outputs of the graph.
config: tf.ConfigProto.
graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)
Returns:
A new, optimized GraphDef.
"""
meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)
# We need to add a collection called 'train_op' so that grappler
# knows what the outputs are.
fetch_collection = _meta_graph_pb2.CollectionDef()
for array in input_arrays + output_arrays:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _remove_lower_using_switch_merge(graph_def):
"""Remove '_lower_using_switch_merge' attributes from the given graph.
Args:
graph_def: GraphDef to be optimized.
Returns:
A new GraphDef that with no '_lower_using_switch_merge' attribute.
"""
out = _graph_pb2.GraphDef()
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
for node in graph_def.node:
new_node = copy.deepcopy(node)
if new_node.op == "While":
new_node.attr[_LOWER_USING_SWITCH_MERGE].b = False
out.node.extend([new_node])
return out
def _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes):
if is_frozen_graph(sess):
raise ValueError("Try to convert op hints, needs unfrozen graph.")
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph_def, output_arrays + hinted_outputs_nodes)
graph_def = convert_op_hints_to_stubs(graph_def=graph_def)
graph_def = tf_graph_util.remove_training_nodes(graph_def)
return graph_def
def freeze_graph(sess, input_tensors, output_tensors):
"""Returns a frozen GraphDef.
Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
existing GraphDef is returned. The Grappler pass is only run on models that
are frozen in order to inline the functions in the graph.
If OpHints is present, it will try to convert the OpHint graph.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
# Runs a Grappler pass in order to inline any functions in the graph.
# Asides from inlining any simple function, Grappler will also try to lower
# while loop into switch merge representation which is undesired for Ophints,
# so we simply remove those attributes to prevent Grappler from doing so.
graph_def = _remove_lower_using_switch_merge(sess.graph_def)
config = get_grappler_config(["function"])
graph_def = run_graph_optimizations(
graph_def, input_tensors, output_tensors, config, graph=sess.graph)
# If ophints are present, just convert them.
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
if hinted_outputs_nodes:
return _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes)
if not is_frozen_graph(sess):
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
return tf_graph_util.convert_variables_to_constants(sess, graph_def,
output_arrays)
else:
return sess.graph_def
def is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
|
tensorflow-master
|
tensorflow/lite/python/util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.lite.OpHint("cool_activation")
input, = custom.add_inputs(input)
output = tf.sigmoid(input) * input
output, = custom.add_outputs(output)
return output
image = tf.compat.v1.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.compat.v1.Session()
graphdef_to_convert = tf.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.compat.v1.lite.toco_convert(
graphdef_to_convert, [image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
# TODO(aselle): Make this use generic graph transformations.
# TODO(aselle): _tensor_name_base should be called _tensor_name_to_op_name.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import copy as _copy
import json as _json
import uuid as _uuid
import six as _six
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.framework import node_def_pb2 as _node_def_pb2
from tensorflow.python.framework import ops as _ops
# TODO(aselle): publicize these apis if we continue to use these.
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export(v1=["lite.OpHint"])
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
"""
# TODO(aselle): When TensorFlow functions functionality works for arbitrary
# constructs, this mechanism can be retired and changed to use python defun's.
# Attr constants that are used for representation in the GraphDef. These
# will be used on every Identity op that is involved in a total OpHint.
# Name of the OpHint function (cosmetic).
FUNCTION_NAME_ATTR = "_tflite_function_name"
# UUID of the function (each OpHint gets a new uuid).
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
# The input index of the input (or nothing if it is an output).
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
# The output index of the output (or nothing if it is an input).
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
# An index that orders aggregate arguments. Aggregate arguments are ones
# that are separate but will be fused horizontally. For example a static LSTM
# has a lstm cell for each time step. Each one has a separate opHint, but a
# fused SequentialLSTM will treat this as a single tensor.
FUNCTION_SORT_INDEX_ATTR = "_tflite_function_sort_index"
# The way in which multiple parts of the aggregate argument will be joined
# into a fused operand. Valid options are OpHint.AGGREGATE_FIRST,
# OpHint.AGGREGATE_LAST, OpHint.AGGREGATE_STACK.
FUNCTION_AGGREGATE_ATTR = "_tflite_function_aggregate"
# On fused OpHint stub, the order of inputs that the final LSTM call will
# have. What this means is that the TensorFlow order might be
# "foo", "bar", "stuff" and you might want the TF lite op order to be
# "stuff", "foo", "bar", -1 (where -1 is unused). So you would set this
# attribute to [2, 0, 1, -1].
TFLITE_INPUT_INDICES = "_tflite_input_indices"
# OpHint level.
FUNCTION_LEVEL_ATTR = "_tflite_ophint_level"
# Ophint internal mapping, this is for high level Ophint only.
# This basically contains three kinds of mapping:
# 1) How parental ophinted inputs map to the first child ophinted inputs;
# 2) How internal children nodes are connected;
# 3) How parental ophinted outputs map to the last child ophinted outputs.
CHILDREN_INPUTS_MAPPINGS = "_tflite_children_ophint_inputs_mapping"
# Types of aggregations
# stack: stacks all ophints with matching tags. i.e. for a static rnn.
# specifically, this is good for an input or output to a static rnn cell.
AGGREGATE_STACK = "stack"
# first: only takes the first output (one with lowest sort index)
# of matching tags. This is good for the input state to an RNN.
AGGREGATE_FIRST = "first"
# aggregation last takes only the last tag (one with highest sort index).
# This is good for an output value on the last stack item of a
# static rnn.
AGGREGATE_LAST = "last"
class OpHintArgumentTracker(object):
"""Conceptually tracks indices of arguments of "OpHint functions".
The inputs and arguments of these functions both use an instance
of the class so they can have independent numbering.
"""
def __init__(self,
function_name,
unique_function_id,
node_name_prefix,
attr_name,
level=1,
children_inputs_mappings=None):
"""Initialize ophint argument.
Args:
function_name: Name of the function that this tracks arguments for.
unique_function_id: UUID of function that this tracks arguments for.
node_name_prefix: How identities that are created are named.
attr_name: Name of attribute to use to store the index for this hint.
i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX
level: Hierarchical level of the Ophint node, a number.
children_inputs_mappings: Inputs/Outputs mapping for children hints.
"""
# The global index is the argument index of the op. This is in contrast
# to the sort index which is the sequence number of a particular instance
# of a given global index. For example, you may have called add hint
# twice with the tag "foo". Then the global index will be 0 for both
# and the sort index will be 0 for the first added and 1 for the second.
self._function_name = function_name
self._unique_function_id = unique_function_id
self._next_global_index = 0 # The absolute global index
self._used_global_indices = set()
self._tag_to_global_index = {} # The argument index a given tag maps to
self._tag_to_next_sort_index = {} # The current index for each tag
self._node_name_prefix = node_name_prefix
self._attr_name = attr_name
self._level = level
self._children_inputs_mappings = children_inputs_mappings
def _get_new_global_index(self, index_override):
"""Return the next unused argument index in order or use an override.
Args:
index_override: An index to use instead of the next available or None
to use the next available.
Returns:
A valid global_index to use for the next hint argument.
Raises:
ValueError: If the index_override is already used by another hint.
"""
if index_override is None:
global_index = self._next_global_index
else:
if index_override in self._used_global_indices:
raise ValueError("Index %d was already used by another call to add")
global_index = index_override
# Make next_global_index valid
self._used_global_indices.add(global_index)
while self._next_global_index in self._used_global_indices:
self._next_global_index += 1
return global_index
def add(self, arg, tag=None, name=None, aggregate=None,
index_override=None):
"""Return a wrapped tensor of an input tensor as an argument.
Args:
arg: A TensorFlow tensor that should be considered an argument.
tag: String tag to identify arguments that should be packed.
name: Name of argument. This is included in the Identity hint op names.
aggregate: Strategy to aggregate.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
Note, aggregate is only valid if tag is specified.
index_override: Specify what input/output index should this be in the
final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the
final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than
the default call order based ordering.
Returns:
A tensor representing the wrapped argument.
Raises:
ValueError: When indices are not consistent.
"""
# Find the appropriate index
if tag is None:
if aggregate is not None:
raise ValueError("You must specify `tag` if using aggregate.")
global_index = self._get_new_global_index(index_override)
sort_index = None
else:
if aggregate is None:
raise ValueError("You must specify `aggregate` if using tag.")
if tag not in self._tag_to_global_index:
self._tag_to_global_index[tag] = (
self._get_new_global_index(index_override))
self._tag_to_next_sort_index[tag] = 0
elif (index_override and
index_override != self._tag_to_global_index[tag]):
raise ValueError(
"Tag %r was called with two indices %r and %r" %
(tag, index_override, self._tag_to_global_index[tag]))
global_index = self._tag_to_global_index[tag]
sort_index = self._tag_to_next_sort_index[tag]
self._tag_to_next_sort_index[tag] += 1
uuid = self._unique_function_id
name = "%s-%s-%s-%r-%r-%s" % (self._node_name_prefix, self._function_name,
uuid, global_index, sort_index, name)
identity_op = _array_ops.identity(arg, name=name)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._function_name)))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._unique_function_id)))
identity_op.op._set_attr(
self._attr_name, _attr_value_pb2.AttrValue(i=global_index))
identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR,
_attr_value_pb2.AttrValue(i=self._level))
if self._children_inputs_mappings:
identity_op.op._set_attr(
OpHint.CHILDREN_INPUTS_MAPPINGS,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(_json.dumps(
self._children_inputs_mappings))))
if sort_index is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_SORT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=sort_index))
if aggregate is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_AGGREGATE_ATTR,
_attr_value_pb2.AttrValue(s=_compat.as_bytes((aggregate))))
# pylint: enable=protected-access
return identity_op
def __init__(self,
function_name,
level=1,
children_inputs_mappings=None,
**kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
level: OpHint level.
children_inputs_mappings: Children OpHint inputs/outputs mapping.
children_inputs_mappings should like below:
"parent_first_child_input":
[{"parent_input_index": num, "child_input_index": num}, ...]
"parent_last_child_output":
[{"parent_output_index": num, "child_output_index": num}, ...]
"internal_children_input_output":
[{"child_input_index": num, "child_output_index": num}, ...]
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._level = level
if self._level == 1:
assert children_inputs_mappings is None
else:
assert isinstance(children_inputs_mappings, dict)
self._children_inputs_mappings = children_inputs_mappings
if self._children_inputs_mappings is not None:
self._validate_children_inputs_mappings(self._children_inputs_mappings)
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._attrs_to_store_later = kwargs
self._stored_attrs = False
self._inputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "InputHint",
OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings)
self._outputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "OutputHint",
OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level,
self._children_inputs_mappings)
def _validate_children_inputs_mappings(self, children_inputs_mappings):
"""Validate children inputs mappings is in the right format.
Args:
children_inputs_mappings: the Children ophint inputs/outputs mapping.
"""
assert isinstance(children_inputs_mappings, dict)
assert "parent_first_child_input" in children_inputs_mappings
assert "parent_last_child_output" in children_inputs_mappings
assert "internal_children_input_output" in children_inputs_mappings
# validate parent_first_child_input.
def assert_dictlist_has_keys(dictlist, keys):
for dikt in dictlist:
assert isinstance(dikt, dict)
for key in keys:
assert key in dikt
assert_dictlist_has_keys(
children_inputs_mappings["parent_first_child_input"],
["parent_ophint_input_index", "first_child_ophint_input_index"])
assert_dictlist_has_keys(
children_inputs_mappings["parent_last_child_output"],
["parent_output_index", "child_output_index"])
assert_dictlist_has_keys(
children_inputs_mappings["internal_children_input_output"],
["child_input_index", "child_output_index"])
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_input(self, *args, **kwargs):
"""Add a wrapped input argument to the hint.
Args:
*args: The input tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped input tensor.
"""
return self._inputs.add(*args, **kwargs)
def add_output(self, *args, **kwargs):
"""Add a wrapped output argument to the hint.
Args:
*args: The output tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped output tensor.
"""
return self._outputs.add(*args, **kwargs)
def add_inputs(self, *args, **kwargs):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
**kwargs: This allows 'names' which should be a list of names.
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._inputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._inputs.add(arg) for arg in args]
def add_outputs(self, *args, **kwargs):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
**kwargs: See
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._outputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._outputs.add(arg) for arg in args]
class _LiteOperand(object):
"""Abstract operand for a tflite hint function._dynamic_rnn_loop.
This is a base class that handles representing arguments to an OpHint.
It also is able to serialize operands to the stubbed graph_def.
Child classes are responsible for being able to
store information about the hint identity operators. They are also responsible
for knowing how to serialize to output graphdefs.
Typically this will be implemented by holding one or more identity nodes
that were previously discovered as hints.
"""
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the node(s) to out_graphdef and returns the input node name.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The output that the stub should use as an input for this operand.
Raises:
RuntimeError: if the method is not implemented.
"""
del out_graphdef
raise RuntimeError("Unimplemented abstract method.")
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""Add node(s) to graph representing output operands and returns type.
Args:
fused_op_name: name of the fused op stub name.
output_index: Output index that we are currently processing from stub.
out_graphdef: The destination graphdef we are currently building up.
Returns:
The datatype of this identity.
Raises:
RuntimeError: if the method is not implemented.
"""
del fused_op_name, output_index, out_graphdef
raise RuntimeError("Unimplemented abstract method.")
class _LiteSingleOperand(_LiteOperand):
"""A simple operand that is non-aggregated (i.e. most hints)."""
def __init__(self, node):
_LiteOperand.__init__(self)
self.node = node
self.name = _tensor_name_base(node.name)
def flatten(self):
return [self.name]
def aggregate_and_return_name_for_input(self, out_graphdef):
return self.name
def aggregate_and_return_name_for_output(self, fused_op_name, index,
out_graphdef):
output_node = _copy.deepcopy(self.node)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(fused_op_name, index))
out_graphdef.node.extend([output_node])
return self.node.attr["type"].i
def __str__(self):
return str(self.name)
class _LiteAggregateOperand(_LiteOperand):
"""An operand for a tflite hint function that is aggregated from many.
For example, an LSTM is a grid of operators that are all related. Inputs
going into them may need to be fused, so they should all be tracked as
related arguments.
"""
def __init__(self, aggregation):
_LiteOperand.__init__(self)
self.aggregation = aggregation
self.names = {}
self.nodes = {}
self.flattened = None
def add(self, sort, node):
self.names[sort] = _tensor_name_base(node.name)
self.nodes[sort] = node
def flatten_nodes(self):
"""Return a list of all the node protos in aggregation sorted order."""
if not self.flattened:
self.flattened = [None] * len(self.nodes)
for idx, node in _six.iteritems(self.nodes):
self.flattened[idx] = node
for n in self.nodes:
if n is None:
raise RuntimeError("Aggregate was missing argument.")
if self.aggregation == OpHint.AGGREGATE_FIRST:
self.flattened = self.flattened[:1]
elif self.aggregation == OpHint.AGGREGATE_LAST:
self.flattened = self.flattened[-1:]
elif self.aggregation == OpHint.AGGREGATE_STACK:
pass
else:
raise ValueError(
"Invalid aggregation type %r specified" % self.aggregation)
return self.flattened
def flatten(self):
"""Return a list of all node names in aggregation sorted sorter."""
return [_tensor_name_base(x.name) for x in self.flatten_nodes()]
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the nodes to out_graphdef and returns an aggregated output.
In particular, if you have 4 inputs to a hint stub, this will be the
node that you can use as an output. I.e. you have 4 timesteps from a
static rnn, then a fused UnidriecitonalLSTM will expect 1 input with
all 4 time steps. So here we make a pack and return the output name of
that pack.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The name of a pack that aggregates this node.
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
return _tensor_name_base(flattened[0].name)
else:
new_node = _node_def_pb2.NodeDef()
new_node.op = "Pack"
new_node.name = "OpHintStack-%s" % flattened[0].name
new_node.attr["N"].i = len(flattened)
new_node.attr["T"].type = flattened[0].attr["T"].type
for discrete in flattened:
new_node.input.append(_tensor_name_base(discrete.name))
out_graphdef.node.extend([new_node])
return new_node.name
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""This adds to `out_graphdef` all the unaggregated outputs.
I.e. we are outputting from a fused stub, but we need to make it compatible
with the unfused original graph so we insert an unpack. Ideally in a later
stage the unpack -> pack sequences will be removed.
Args:
fused_op_name: The name of the stub we are in the process of fusing.
output_index: The output output_index this object represents.
out_graphdef: The graphdef we are in the process of buildings
Returns:
The type of the aggregated output (so we can finish building the stub
op).
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
temp_op = _LiteSingleOperand(flattened[0])
return temp_op.aggregate_and_return_name_for_output(
fused_op_name, output_index, out_graphdef)
else:
stack_node = _node_def_pb2.NodeDef()
stack_node.op = "Unpack"
stack_node.name = "OpHintUnstack-%s" % flattened[0].name
stack_node.attr["num"].i = len(flattened)
output_type = flattened[0].attr["T"].type
stack_node.attr["T"].type = output_type
stack_node.input.append(_tensorflow_output_name(
fused_op_name, output_index))
out_graphdef.node.extend([stack_node])
for idx, discrete in enumerate(flattened):
output_node = _copy.deepcopy(discrete)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(stack_node.name, idx))
out_graphdef.node.extend([output_node])
return output_type
def __str__(self):
s = "\t\t\tAGGREGATE %s\n" % self.aggregation
for sort, val in self.names.iteritems():
s += "\t\t\t%d: %s\n" % (sort, val)
return s
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Attributes:
inputs: inputs to the op (hash from index # to argument)
outputs: outputs to the op (hash from index # to argument)
function_name: the tflite custom op name to use
uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
level: Level of the OpHint.
children_inputs_mappings: If the Ophint has children, children inputs
mappings indicate how their inputs & outputs are mapped.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
self.level = -1
self.children_inputs_mappings = {}
def flattened_inputs_and_outputs(self):
"""Return a list of inputs and outputs in a flattened format.
Returns:
Tuple of (inputs, outputs). where input and output i a list of names.
"""
def _flatten(input_or_output_dict):
flattened_items = []
for item in input_or_output_dict.values():
flattened_items.extend(item.flatten())
return flattened_items
return _flatten(self.inputs), _flatten(self.outputs)
def __str__(self):
def format_args(items):
s = ""
for idx, item in items.iteritems():
s += ("\t\t%d:\n" % idx) + str(item)
return s
inputs_str = "\tInputs\n" + format_args(self.inputs)
outputs_str = "\tOutputs\n" + format_args(self.outputs)
return (
"tflite function %s call %s level %d "
"\n\tinputs:\n\t\t%s\n\toutputs:\n\t\t%s" %
(self.function_name, self.uuid, self.level, inputs_str, outputs_str))
def _find_all_hints_in_nodes(nodes):
"""Look at the all the input nodes and return a list of LiteFuncCall objs.
Args:
nodes: A TensorFlow graph_def to look for LiteFuncCalls.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
for node in nodes:
attr = node.attr
# This is an op hint if it has a FUNCTION_UUID_ATTR, otherwise skip
if (OpHint.FUNCTION_UUID_ATTR not in attr
or not attr[OpHint.FUNCTION_UUID_ATTR].s):
continue
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
# Start building function
call_def = func_calls[uuid]
call_def.uuid = uuid
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
call_def.level = attr[OpHint.FUNCTION_LEVEL_ATTR].i
# Get sorting and aggregation information
sort = (attr[OpHint.FUNCTION_SORT_INDEX_ATTR].i
if OpHint.FUNCTION_SORT_INDEX_ATTR in attr else None)
if sort == -1: sort = None
aggregation = None
if OpHint.FUNCTION_AGGREGATE_ATTR in attr:
aggregation = _compat.as_text(attr[OpHint.FUNCTION_AGGREGATE_ATTR].s)
if OpHint.CHILDREN_INPUTS_MAPPINGS in attr:
call_def.children_inputs_mappings = _json.loads(
_compat.as_text(attr[OpHint.CHILDREN_INPUTS_MAPPINGS].s))
# Add the input or output
def put_operand(stuff, index, sort, operand, aggregation):
"""Add a given index into the function structure."""
if sort is None:
stuff[index] = _LiteSingleOperand(operand)
else:
if index not in stuff:
stuff[index] = _LiteAggregateOperand(aggregation)
stuff[index].add(sort, operand)
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
put_operand(call_def.inputs, attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i,
sort, node, aggregation)
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
put_operand(call_def.outputs, attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i,
sort, node, aggregation)
# Remember attributes
for a in attr:
if a.startswith("_tflite_attr_"):
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _extract_topology_sequence_mapping(nodes):
return dict(
(_tensor_name_base(node.name), idx) for idx, node in enumerate(nodes))
def _find_children_hints_in_while_loop(function_def, nodes_mapping):
"""Find children hints and all nodes inside the while loop.
Args:
function_def: Function def of the while loop.
nodes_mapping: While loop input_arg : real node name.
Returns:
Ordered children hints and all re-mapped nodes inside the while loop.
"""
new_nodes = []
# Make nodes inside function def inputs point to the real nodes.
for node in function_def.node_def:
for i, _ in enumerate(node.input):
if node.input[i] in nodes_mapping:
node.input[i] = nodes_mapping[node.input[i]]
new_nodes.append(_copy.deepcopy(node))
name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)
children_hints = _find_all_hints_in_nodes(new_nodes)
children_hints_q = []
# Ordered by the outputs.
for hint in _six.itervalues(children_hints):
_, output_names = hint.flattened_inputs_and_outputs()
seq = name_to_seq_num[output_names[0]]
for output_name in output_names:
seq = min(seq, name_to_seq_num[output_name])
children_hints_q.append((seq, hint))
children_hints_q.sort(key=lambda tup: tup[0])
ordered_children_hints = [x[1] for x in children_hints_q]
return ordered_children_hints, new_nodes
def _find_children_hints(call, graph_def):
"""Find all children hints.
For a given OpHint, we find all children hints inside it, we also copy all the
nodes inside function defs (if applicable) to the original graph_def, they are
returned in a list as well.
Args:
call: Parent OpHint that contains children ophints.
graph_def: Original graph def.
Returns:
Ordered children hints inside the parent ophint; new graph def that contains
nodes inside function defs (if applicable); nodes inside function defs.
"""
name_to_input_name, _, _ = _extract_graph_summary(graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
children_hints = []
out = _graph_pb2.GraphDef()
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
function_def_nodes = set()
for node in graph_def.node:
out.node.extend([_copy.deepcopy(node)])
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# special handle for while loop function def.
if node.op == "While":
body_name = node.attr["body"].func.name
inputs_outside_loop = node.input
for function_def in graph_def.library.function:
if function_def.signature.name == body_name:
function_inputs = function_def.signature.input_arg
assert len(inputs_outside_loop) == len(function_inputs)
nodes_mapping = {}
for i, function_input in enumerate(function_inputs):
nodes_mapping[function_input.name] = inputs_outside_loop[i]
# TODO(b/123050804): Consider use grappler.
(children_hints_in_loop,
new_nodes) = _find_children_hints_in_while_loop(
function_def, nodes_mapping)
function_def_nodes.update([x.name for x in new_nodes])
children_hints.extend(children_hints_in_loop)
out.node.extend(new_nodes)
return children_hints, out, function_def_nodes
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
if full_tensor_name.startswith("^"):
return full_tensor_name[1:]
return full_tensor_name.split(":")[0]
def _tensorflow_output_name(tensor_name, output_index):
return tensor_name if output_index == 0 else "%s:%d" % (tensor_name,
output_index)
# TODO(aselle): This should be converted to grappler in the future.
def _check_subgraph_closed(n, reachable_by_input, input_nodes_set,
name_to_input_name):
"""Checks to make sure node only connects to predecessor graph through inputs.
Args:
n: Node to check
reachable_by_input: Nodes that are reachable by all inputs of subgraph
input_nodes_set: The set of nodes that are "inputs".
name_to_input_name: Maps from name to the list of inputs.
Raises:
TypeError: If the given node uses items past inputs directly.
"""
next_to_visit = [n]
visited = set()
while next_to_visit:
current_node = next_to_visit.pop()
visited.add(current_node)
if (current_node in reachable_by_input
and current_node not in input_nodes_set):
raise TypeError(
"Node %s uses input %s not in input_nodes." % (n, current_node))
if current_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[current_node]
if input_node not in visited
]
# TODO(aselle): This should be converted to grappler in the future.
def _convert_single_op_hint_to_stub(call,
graph_def,
function_def_nodes=None,
is_last_run=True):
"""Given a graph_def, converts `call` into a stub and returns a new graph_def.
Args:
call: A single function call to be converted.
graph_def: A graph_def to use as input (that has call obviously).
function_def_nodes: Nodes inside the function def those are not connected to
the graph.
is_last_run: Whether it is the last run for a given pass (for OpHint has
children).
Returns:
A new transformed graph-def that has call as a stub (single op).
Note: after this process, the graph_def can no longer be loaded into
the tensorflow runtime, so all future manipulations are done in graph_def
level.
"""
if function_def_nodes is None:
function_def_nodes = set()
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
nodes_after_fuse = []
nodes_deleted_by_fuse = set()
# Classify each node. We want to keep everything reachable by input, but
# we don't know if things that are not reachable by output or input (things
# after fusing).
for node in graph_def.node:
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
nodes_deleted_by_fuse.add(n)
elif n not in reachable_by_input and n not in function_def_nodes:
# n is a node that after all the fusings, so keep it.
nodes_after_fuse.append(n)
else:
# In the last run, n is a node that is randomly in the graph but not
# connected to the chain of dependencies, we will delete n, otherwise
# we keep them.
if not is_last_run:
nodes_after_fuse.append(n)
# Make a new graphdef with all the pre-input and input nodes
out = _graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([_copy.deepcopy(name_to_node[node])])
# Create any stacks to aggregate arguments into to a single input
# i.e. for static_rnn's.
# TODO(aselle): Check that the inputs are complete i.e. 0 to n-1
sorted_input_indices = list(call.inputs.keys())
sorted_input_indices.sort()
sorted_output_indices = list(call.outputs.keys())
sorted_output_indices.sort()
new_node = _node_def_pb2.NodeDef()
# Delegate to each operand to produce the proper new input for this stub node.
# In particular, an aggregate input will now be a Pack of some previously
# non-fused things.
for input_index in sorted_input_indices:
inputs = call.inputs[input_index]
input_name = inputs.aggregate_and_return_name_for_input(out)
new_node.input.append(input_name)
new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices)
# Create the function
new_node.op = call.function_name
new_node.name = call.uuid
out.node.extend([new_node])
# Now call each output argument to give them a chance to make the proper
# output type and add it to our new_node.
output_dtypes = []
for output_index in sorted_output_indices:
output = call.outputs[output_index]
output_dtype = (
output.aggregate_and_return_name_for_output(new_node.name, output_index,
out))
output_dtypes.append(output_dtype)
new_node.attr["_output_types"].list.type[:] = output_dtypes
# TODO(aselle): what is right here?
new_node.attr["_output_quantized"].b = False
# Add post output nodes that do not depend on the outputs
for n in nodes_after_fuse:
should_keep = True
for input_name in name_to_input_name[n]:
if input_name in nodes_deleted_by_fuse:
should_keep = False
if should_keep:
out.node.extend([_copy.deepcopy(name_to_node[n])])
# Misc. graph_def data that needs copying.
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
# TODO(aselle): This should be converted to grappler in the future.
def _remove_one_redundant_stack_unstack(in_graph_def):
"""Removes a stack->unstack pattern from in_graph_def in a returned graph.
Args:
in_graph_def: Graph def to use as input.
Returns:
Simplified tuple (graph_def, changed_something) where changed_something
is true if anything was done.
"""
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
in_graph_def)
del name_to_seq_num
# TODO(aselle): Make this not hardcoded.
do_generic_pack_unpack = True
out = _graph_pb2.GraphDef()
out.library.CopyFrom(in_graph_def.library)
out.versions.CopyFrom(in_graph_def.versions)
for n in in_graph_def.node:
node_name = _tensor_name_base(n.name)
if not node_name.startswith("OpHintStack") and not n.op.startswith("Pack"):
continue
next_to_visit = [node_name]
visited = set()
unpack_nodes = set()
pack_node = node_name
# Find a pattern of unstack connected to a stack (with identities
# in between.
matches_pattern = True
is_hint_created_stack = False
while next_to_visit:
current_node_name = next_to_visit[0]
visited.add(current_node_name)
del next_to_visit[0]
node = name_to_node[current_node_name]
is_op_hint_stack = node.name.startswith("OpHintStack")
is_op_hint_unstack = node.name.startswith("OpHintUnstack")
if (node.op == "Identity" or is_op_hint_stack
or (do_generic_pack_unpack and node.op == "Pack")):
is_hint_created_stack |= is_op_hint_stack
next_to_visit += [
input_node for input_node in name_to_input_name[current_node_name]
if input_node not in visited
]
elif (is_op_hint_unstack
or (do_generic_pack_unpack and node.op == "Unpack")):
unpack_nodes.add(node.name)
is_hint_created_stack &= is_op_hint_unstack
else:
matches_pattern = False
break
visited.add(node.name)
if matches_pattern and len(unpack_nodes) == 1:
pack_node = node_name
# Check to see if anyone depends on the intermediate identity or the
# Unstacked form
no_external_dependency = True
for other_n in in_graph_def.node:
if other_n.name in visited: continue
for input_tensor in name_to_input_name[other_n.name]:
input_op = _tensor_name_base(input_tensor)
if input_op in visited and input_op != pack_node:
no_external_dependency = False
# Proceed with the substitution if the stack/unstack pair was created
# through hints, or that it was not, but nobody is consuming things
# between the stack and unstack.
if is_hint_created_stack or no_external_dependency:
end = unpack_nodes.pop()
end_input = name_to_node[end].input[0]
# All nodes that depend on the final stack need to be redone to use
for other_n in in_graph_def.node:
node_name = _tensor_name_base(other_n.name)
if node_name not in visited:
new_node = _copy.deepcopy(other_n)
new_node.input[:] = [
(end_input if stripped == pack_node else
non_stripped) for stripped, non_stripped in zip(
name_to_input_name[node_name], new_node.input[:])
]
out.node.extend([new_node])
return out, True
return in_graph_def, False
def _remove_redundant_stack_unstack(graph_def):
curr = graph_def
del graph_def
changed_stuff = True
while changed_stuff:
curr, changed_stuff = _remove_one_redundant_stack_unstack(curr)
return curr
def _get_correct_mapping(original_index, nodes):
# Special handle for the index is -1 case.
# If it is -1, return the last index.
if original_index == -1:
node_indices = nodes.keys()
node_indices = sorted(node_indices)
return node_indices[-1]
else:
return original_index
return original_index
def _convert_op_hints_to_stubs_helper(
graph_def, write_callback=lambda sess, graph_def: None):
"""Converts a graph_def to a new graph_def where all op hints are stubbed.
Args:
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new stubbed graph_def.
"""
hints = _find_all_hints_in_nodes(graph_def.node)
hints_q = []
for hint in _six.itervalues(hints):
hints_q.append((hint.level, hint.uuid))
hints_q.sort(key=lambda tup: tup[0])
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
curr_graph_def = graph_def
del graph_def # prevent using graph_def again (common source of error)
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
if level >= 2:
children_hints, curr_graph_def, function_def_nodes = _find_children_hints(
hints[hint_uuid], curr_graph_def)
# pylint: disable=superfluous-parens
assert (len(children_hints) > 0) # pylint: disable=g-explicit-length-test
# pylint: enable=superfluous-parens
# Re-wire the children hints inputs/outputs, so latter child's inputs
# connect to previous child node's outputs.
children_inputs_mappings = hints[hint_uuid].children_inputs_mappings
for j, child_hint in enumerate(children_hints):
if j == 0:
for mapping in children_inputs_mappings["parent_first_child_input"]:
parent_input_index = _get_correct_mapping(
mapping["parent_ophint_input_index"], hints[hint_uuid].inputs)
child_input_index = _get_correct_mapping(
mapping["first_child_ophint_input_index"], child_hint.inputs)
child_hint.inputs[child_input_index] = hints[hint_uuid].inputs[
parent_input_index]
else:
for mapping in children_inputs_mappings[
"internal_children_input_output"]:
input_index = _get_correct_mapping(mapping["child_input_index"],
child_hint.inputs)
output_index = _get_correct_mapping(mapping["child_output_index"],
children_hints[j - 1].outputs)
child_hint.inputs[input_index] = children_hints[
j - 1].outputs[output_index]
if j == len(children_hints) - 1:
for mapping in children_inputs_mappings["parent_last_child_output"]:
parent_output_index = _get_correct_mapping(
mapping["parent_output_index"], hints[hint_uuid].outputs)
child_output_index = _get_correct_mapping(
mapping["child_output_index"], child_hint.outputs)
child_hint.outputs[child_output_index] = hints[hint_uuid].outputs[
parent_output_index]
for j, child_hint in enumerate(children_hints):
curr_graph_def = _convert_single_op_hint_to_stub(
child_hint, curr_graph_def, function_def_nodes,
j == len(children_hints) - 1)
else:
curr_graph_def = _convert_single_op_hint_to_stub(hints[hint_uuid],
curr_graph_def)
write_callback(curr_graph_def, "initial")
# The stubbing process can create stacks/unstacks in the case of LSTMs
# remove them.
curr_graph_def = _remove_redundant_stack_unstack(curr_graph_def)
return curr_graph_def
def find_all_hinted_output_nodes(session=None, graph_def=None):
"""Find all Ophints output nodes in the graph.
This is used to get all the output nodes those are ophinted, it is important
for operation like convert_variables_to_constants keep all ophints structure.
Note: only one of session or graph_def should be used, not both.
Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can
generate multiple outputs for unfused subgraph. If not all output nodes are
consumed, graph optimization can potentially drop the unused nodes and cause
ophints in an invalid states (due to missing ophinted output nodes). So it's
important for us to find all those hinted output nodes and make sure they're
not discarded away.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
Returns:
A list of OpHints output nodes.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
hinted_outputs_nodes = []
if session is not None:
hints = _find_all_hints_in_nodes(session.graph_def.node)
elif graph_def is not None:
hints = _find_all_hints_in_nodes(graph_def.node)
for hint in _six.itervalues(hints):
_, output_nodes = hint.flattened_inputs_and_outputs()
hinted_outputs_nodes.extend(output_nodes)
return hinted_outputs_nodes
@_tf_export(v1=["lite.experimental.convert_op_hints_to_stubs"])
def convert_op_hints_to_stubs(session=None,
graph_def=None,
write_callback=lambda graph_def, comments: None):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Note: only one of session or graph_def should be used, not both.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
if session is not None:
return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)
elif graph_def is not None:
return _convert_op_hints_to_stubs_helper(graph_def, write_callback)
else:
raise ValueError("Must specify session or graph_def as input.")
_allowed_symbols = [
"OpHint", "convert_op_hints_to_stubs", "convert_op_hints_to_stubs_new",
"find_all_hinted_output_nodes"
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/lite/python/op_hint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return output.decode()
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str)
return model_str
except Exception as e:
raise ConverterError("TOCO failed: %s" % e)
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
fp_model.flush()
fp_toco.flush()
fp_input.flush()
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin, model_filename, toco_filename, input_filename,
output_filename
]
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError(
"TOCO failed. See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
Returns:
model_flags, toco_flags: two protocol buffers describing the conversion
process.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
*args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(map(int, shape))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
def toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
return toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs)
|
tensorflow-master
|
tensorflow/lite/python/convert.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
@test_util.run_v1_only('Incompatible with 2.0.')
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testString(self):
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input.
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantizeDeprecatedAttribute(self):
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
self.assertFalse(quantized_converter.post_training_quantize)
quantized_converter.post_training_quantize = True
self.assertTrue(quantized_converter.post_training_quantize)
self.assertEqual(quantized_converter.optimizations, [lite.Optimize.DEFAULT])
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=(1, 5, 5, 3), name='input')
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
return (inp, output, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self):
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self):
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testPostTrainingCalibrateAndQuantizeInt8Inputs(self):
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.inference_input_type = lite_constants.INT8
quantized_converter.inference_output_type = lite_constants.INT8
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The input and output types should be int8.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.int8, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.int8, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testMultipleOutputNodeNames(self):
"""Tests converting a graph with an op that have multiple outputs."""
input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out0, out1, out2, out3 = array_ops.split(input_tensor, [1, 1, 1, 1], axis=0)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [input_tensor],
[out0, out1, out2, out3])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
interpreter.set_tensor(input_details[0]['index'],
np.asarray([1.0, 2.0, 3.0, 4.0], dtype=np.float32))
interpreter.invoke()
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual(1.0, interpreter.get_tensor(output_details[0]['index']))
self.assertEqual(2.0, interpreter.get_tensor(output_details[1]['index']))
self.assertEqual(3.0, interpreter.get_tensor(output_details[2]['index']))
self.assertEqual(4.0, interpreter.get_tensor(output_details[3]['index']))
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests tf.function in 1.X."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output_node', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInferenceInputOutputTypeFloatDefault(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testInferenceInputOutputTypeQuantizedUint8Default(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testReusingConverterWithDifferentPostTrainingQuantization(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.post_training_quantize = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
converter.post_training_quantize = False
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testResizingIntermediateDynamicTensor(self):
# This is a regression test for the case where shape of dynamic output
# tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
# The bug is triggered only when dynamic tensor is intermediate. Putting
# some other ops around it.
neg = math_ops.negative(input2_tensor)
padding = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int32)
output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session()
converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert()
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[1]['index'],
np.array([[1, 1], [1, 1]], dtype=np.int32))
interpreter.invoke()
# Without the fix, invocation will fail when changing the shape of
# intermediate dynamic tensors.
interpreter.set_tensor(input_details[1]['index'],
np.array([[2, 2], [2, 2]], dtype=np.int32))
interpreter.invoke()
@test_util.run_v1_only('Incompatible with 2.0.')
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
def testFloatTocoConverter(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromFrozenGraphObjectDetection(test_util.TensorFlowTestCase):
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location internally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
@test_util.run_v1_only('Incompatible with 2.0.')
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
# Since we only partially specify the input, this is not allowed.
with self.assertRaises(ConverterError):
_ = converter.convert()
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class MyAddLayer(keras.layers.Layer):
def __init__(self, increment, **kwargs):
super(MyAddLayer, self).__init__(**kwargs)
self._increment = increment
def call(self, inputs):
return inputs + self._increment
def get_config(self):
config = super(MyAddLayer, self).get_config()
config['increment'] = self._increment
return config
@test_util.run_v1_only('Incompatible with 2.0.')
class FromKerasFile(test_util.TensorFlowTestCase, parameterized.TestCase):
def setUp(self):
super(FromKerasFile, self).setUp()
self._keras_file = None
self._custom_objects = None
if not context.executing_eagerly():
keras.backend.clear_session()
def tearDown(self):
if self._keras_file:
os.remove(self._keras_file)
super(FromKerasFile, self).tearDown()
def _getSequentialModel(self, include_custom_layer=False):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
if include_custom_layer:
model.add(MyAddLayer(1.0))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, self._keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
if include_custom_layer:
self._custom_objects = {'MyAddLayer': MyAddLayer}
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testSequentialModel(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testCustomLayer(self, test_context):
"""Test a Sequential tf.keras model with default inputs."""
with test_context():
self._getSequentialModel(include_custom_layer=True)
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, custom_objects=self._custom_objects)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(
self._keras_file, custom_objects=self._custom_objects)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
self._getSequentialModel()
# Passing in shape of invalid input array raises error.
with self.assertRaises(ValueError) as error:
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'invalid-input': [2, 3]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
self._keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@parameterized.named_parameters(('_graph', context.graph_mode),
('_eager', context.eager_mode))
def testFunctionalModel(self, test_context):
"""Test a Functional tf.keras model with default inputs."""
with test_context():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 2)
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer='sgd',
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, self._keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, self._keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(self._keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
def testSequentialModelTocoConverter(self):
"""Test a Sequential tf.keras model with deprecated TocoConverter."""
self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(self._keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
@test_util.run_v1_only('Incompatible with 2.0.')
class GrapplerTest(test_util.TensorFlowTestCase):
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
in_tensor = array_ops.placeholder(shape=[3, 3], dtype=dtypes.float32)
y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
out_tensor = math_ops.matmul(in_tensor, y_broadcast, name='output')
sess = session.Session()
# Convert model.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([3, 3] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([3, 3] == output_details[0]['shape']).all())
class ImportOpsUtilTest(test_util.TensorFlowTestCase):
def testGetPotentiallySupportedOps(self):
self.assertIsNotNone(lite.get_potentially_supported_ops())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/python/lite_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for util.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(nupurgarg): Add test for Grappler and frozen graph related functions.
@test_util.run_v1_only("Incompatible with 2.0.")
class UtilTest(test_util.TensorFlowTestCase):
def testConvertDtype(self):
self.assertEqual(
util.convert_dtype_to_tflite_type(lite_constants.FLOAT),
_types_pb2.FLOAT)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.float32), _types_pb2.FLOAT)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.int32), _types_pb2.INT32)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.int64), _types_pb2.INT64)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.string), _types_pb2.STRING)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.uint8),
_types_pb2.QUANTIZED_UINT8)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.complex64),
_types_pb2.COMPLEX64)
self.assertEqual(
util.convert_dtype_to_tflite_type(dtypes.half), _types_pb2.FLOAT16)
with self.assertRaises(ValueError):
util.convert_dtype_to_tflite_type(dtypes.bool)
def testTensorName(self):
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
# out_tensors should have names: "split:0", "split:1", "split:2", "split:3".
out_tensors = array_ops.split(
value=in_tensor, num_or_size_splits=[1, 1, 1, 1], axis=0)
expect_names = ["split", "split:1", "split:2", "split:3"]
for i in range(len(expect_names)):
got_name = util.get_tensor_name(out_tensors[i])
self.assertEqual(got_name, expect_names[i])
@test_util.enable_control_flow_v2
def testRemoveLowerUsingSwitchMerge(self):
i = array_ops.placeholder(shape=(), dtype=dtypes.int32)
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
control_flow_ops.while_loop(c, b, [i])
sess = session.Session()
new_graph_def = util._remove_lower_using_switch_merge(sess.graph_def)
lower_using_switch_merge_is_removed = False
for node in new_graph_def.node:
if node.op == "While":
if not node.attr["_lower_using_switch_merge"].b:
lower_using_switch_merge_is_removed = True
self.assertEqual(lower_using_switch_merge_is_removed, True)
@test_util.run_v1_only("Incompatible with 2.0.")
class TensorFunctionsTest(test_util.TensorFlowTestCase):
def testGetTensorsValid(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
tensors = util.get_tensors_from_tensor_names(sess.graph, ["Placeholder"])
self.assertEqual("Placeholder:0", tensors[0].name)
def testGetTensorsInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
with self.assertRaises(ValueError) as error:
util.get_tensors_from_tensor_names(sess.graph, ["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSetTensorShapeValid(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
util.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]})
self.assertEqual([5, 3, 5], tensor.shape.as_list())
def testSetTensorShapeNoneValid(self):
tensor = array_ops.placeholder(dtype=dtypes.float32)
self.assertEqual(None, tensor.shape)
util.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]})
self.assertEqual([1, 3, 5], tensor.shape.as_list())
def testSetTensorShapeArrayInvalid(self):
# Tests set_tensor_shape where the tensor name passed in doesn't exist.
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"invalid-input": [5, 3, 5]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list())
@test_util.run_deprecated_v1
def testSetTensorShapeDimensionInvalid(self):
# Tests set_tensor_shape where the shape passed in is incompatiable.
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"Placeholder": [1, 5, 5]})
self.assertIn("The shape of tensor 'Placeholder' cannot be changed",
str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list())
def testSetTensorShapeEmpty(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list())
util.set_tensor_shapes([tensor], {})
self.assertEqual([None, 3, 5], tensor.shape.as_list())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/python/util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TF-Lite interpreter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
import numpy as np
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
# pylint: disable=g-inconsistent-quotes
_interpreter_wrapper = LazyLoader(
"_interpreter_wrapper", globals(),
"tensorflow.lite.python.interpreter_wrapper."
"tensorflow_wrap_interpreter_wrapper")
# pylint: enable=g-inconsistent-quotes
del LazyLoader
except ImportError:
# When full Tensorflow Python PIP is not available do not use lazy load
# and instead of the tflite_runtime path.
from tflite_runtime.lite.python import interpreter_wrapper as _interpreter_wrapper
def tf_export_dummy(*x, **kwargs):
del x, kwargs
return lambda x: x
_tf_export = tf_export_dummy
class Delegate(object):
"""Python wrapper class to manage TfLiteDelegate objects.
The shared library is expected to have two functions:
TfLiteDelegate* tflite_plugin_create_delegate(char**, char**, size_t)
void tflite_plugin_destroy_delegate(TfLiteDelegate*)
The first one creates a delegate object. It may return NULL to indicate an
error. The second one destroys delegate object and must be called for every
created delegate object. Passing NULL as argument value is allowed, i.e.
tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...))
always works.
"""
def __init__(self, library, options=None):
"""Loads delegate from the shared library.
Args:
library: Shared library name.
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be serializable. Consult the
documentation of the specific delegate for required and legal options.
(default None)
"""
self._library = ctypes.pydll.LoadLibrary(library)
self._library.tflite_plugin_create_delegate.argtypes = [
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_char_p), ctypes.c_int
]
self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p
# Convert the options from a dictionary to lists of char pointers.
options = options or {}
options_keys = (ctypes.c_char_p * len(options))()
options_values = (ctypes.c_char_p * len(options))()
for idx, (key, value) in enumerate(options.items()):
options_keys[idx] = str(key)
options_values[idx] = str(value)
# Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer.
self._delegate_ptr = self._library.tflite_plugin_create_delegate(
options_keys, options_values, len(options))
def __del__(self):
self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p]
self._library.tflite_plugin_destroy_delegate(self._delegate_ptr)
def _get_native_delegate_pointer(self):
"""Returns the native TfLiteDelegate pointer.
It is not safe to copy this pointer because it needs to be freed.
Returns:
TfLiteDelegate *
"""
return self._delegate_ptr
@_tf_export('lite.experimental.load_delegate')
def load_delegate(library, options=None):
"""Returns loaded Delegate object.
Args:
library: Name of shared library containing the
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates).
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be convertible to str. Consult
the documentation of the specific delegate for required and legal options.
(default None)
Returns:
Delegate object.
Raises:
ValueError: Delegate failed to load.
"""
delegate = Delegate(library, options)
if not delegate._get_native_delegate_pointer(): # pylint: disable=protected-access
raise ValueError('Failed to load delegate from {}'.format(library))
return delegate
@_tf_export('lite.Interpreter')
class Interpreter(object):
"""Interpreter interface for TensorFlow Lite Models.
This makes the TensorFlow Lite interpreter accessible in Python.
It is possible to use this interpreter in a multithreaded Python environment,
but you must be sure to call functions of a particular instance from only
one thread at a time. So if you want to have 4 threads running different
inferences simultaneously, create an interpreter for each one as thread-local
data. Similarly, if you are calling invoke() in one thread on a single
interpreter but you want to use tensor() on another thread once it is done,
you must use a synchronization primitive between the threads to ensure invoke
has returned before calling tensor().
"""
def __init__(self,
model_path=None,
model_content=None,
experimental_delegates=None):
"""Constructor.
Args:
model_path: Path to TF-Lite Flatbuffer file.
model_content: Content of model.
experimental_delegates: Experimental. Subject to change. List of
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
objects returned by lite.load_delegate().
Raises:
ValueError: If the interpreter was unable to create.
"""
if model_path and not model_content:
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile(
model_path))
if not self._interpreter:
raise ValueError('Failed to open {}'.format(model_path))
elif model_content and not model_path:
# Take a reference, so the pointer remains valid.
# Since python strings are immutable then PyString_XX functions
# will always return the same pointer.
self._model_content = model_content
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(
model_content))
elif not model_path and not model_path:
raise ValueError('`model_path` or `model_content` must be specified.')
else:
raise ValueError('Can\'t both provide `model_path` and `model_content`')
# Each delegate is a wrapper that owns the delegates that have been loaded
# as plugins. The interpreter wrapper will be using them, but we need to
# hold them in a list so that the lifetime is preserved at least as long as
# the interpreter wrapper.
self._delegates = []
if experimental_delegates:
self._delegates = experimental_delegates
for delegate in self._delegates:
self._interpreter.ModifyGraphWithDelegate(
delegate._get_native_delegate_pointer()) # pylint: disable=protected-access
def allocate_tensors(self):
self._ensure_safe()
return self._interpreter.AllocateTensors()
def _safe_to_run(self):
"""Returns true if there exist no numpy array buffers.
This means it is safe to run tflite calls that may destroy internally
allocated memory. This works, because in the wrapper.cc we have made
the numpy base be the self._interpreter.
"""
# NOTE, our tensor() call in cpp will use _interpreter as a base pointer.
# If this environment is the only _interpreter, then the ref count should be
# 2 (1 in self and 1 in temporary of sys.getrefcount).
return sys.getrefcount(self._interpreter) == 2
def _ensure_safe(self):
"""Makes sure no numpy arrays pointing to internal buffers are active.
This should be called from any function that will call a function on
_interpreter that may reallocate memory e.g. invoke(), ...
Raises:
RuntimeError: If there exist numpy objects pointing to internal memory
then we throw.
"""
if not self._safe_to_run():
raise RuntimeError("""There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.""")
def _get_tensor_details(self, tensor_index):
"""Gets tensor details.
Args:
tensor_index: Tensor index of tensor to query.
Returns:
a dictionary containing the name, index, shape and type of the tensor.
Raises:
ValueError: If tensor_index is invalid.
"""
tensor_index = int(tensor_index)
tensor_name = self._interpreter.TensorName(tensor_index)
tensor_size = self._interpreter.TensorSize(tensor_index)
tensor_type = self._interpreter.TensorType(tensor_index)
tensor_quantization = self._interpreter.TensorQuantization(tensor_index)
if not tensor_name or not tensor_type:
raise ValueError('Could not get tensor details')
details = {
'name': tensor_name,
'index': tensor_index,
'shape': tensor_size,
'dtype': tensor_type,
'quantization': tensor_quantization,
}
return details
def get_tensor_details(self):
"""Gets tensor details for every tensor with valid tensor details.
Tensors where required information about the tensor is not found are not
added to the list. This includes temporary tensors without a name.
Returns:
A list of dictionaries containing tensor information.
"""
tensor_details = []
for idx in range(self._interpreter.NumTensors()):
try:
tensor_details.append(self._get_tensor_details(idx))
except ValueError:
pass
return tensor_details
def get_input_details(self):
"""Gets model input details.
Returns:
A list of input details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.InputIndices()
]
def set_tensor(self, tensor_index, value):
"""Sets the value of the input tensor. Note this copies data in `value`.
If you want to avoid copying, you can use the `tensor()` function to get a
numpy buffer pointing to the input buffer in the tflite interpreter.
Args:
tensor_index: Tensor index of tensor to set. This value can be gotten from
the 'index' field in get_input_details.
value: Value of tensor to set.
Raises:
ValueError: If the interpreter could not set the tensor.
"""
self._interpreter.SetTensor(tensor_index, value)
def resize_tensor_input(self, input_index, tensor_size):
"""Resizes an input tensor.
Args:
input_index: Tensor index of input to set. This value can be gotten from
the 'index' field in get_input_details.
tensor_size: The tensor_shape to resize the input to.
Raises:
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
# `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size
# parameter.
tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
"""Gets model output details.
Returns:
A list of output details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.OutputIndices()
]
def get_tensor(self, tensor_index):
"""Gets the value of the input tensor (get a copy).
If you wish to avoid the copy, use `tensor()`. This function cannot be used
to read intermediate results.
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
a numpy array.
"""
return self._interpreter.GetTensor(tensor_index)
def tensor(self, tensor_index):
"""Returns function that gives a numpy view of the current tensor buffer.
This allows reading and writing to this tensors w/o copies. This more
closely mirrors the C++ Interpreter class interface's tensor() member, hence
the name. Be careful to not hold these output references through calls
to `allocate_tensors()` and `invoke()`. This function cannot be used to read
intermediate results.
Usage:
```
interpreter.allocate_tensors()
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
for i in range(10):
input().fill(3.)
interpreter.invoke()
print("inference %s" % output())
```
Notice how this function avoids making a numpy array directly. This is
because it is important to not hold actual numpy views to the data longer
than necessary. If you do, then the interpreter can no longer be invoked,
because it is possible the interpreter would resize and invalidate the
referenced tensors. The NumPy API doesn't allow any mutability of the
the underlying buffers.
WRONG:
```
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])()
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])()
interpreter.allocate_tensors() # This will throw RuntimeError
for i in range(10):
input.fill(3.)
interpreter.invoke() # this will throw RuntimeError since input,output
```
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
A function that can return a new numpy array pointing to the internal
TFLite tensor state at any point. It is safe to hold the function forever,
but it is not safe to hold the numpy array forever.
"""
return lambda: self._interpreter.tensor(self._interpreter, tensor_index)
def invoke(self):
"""Invoke the interpreter.
Be sure to set the input sizes, allocate tensors and fill values before
calling this. Also, note that this function releases the GIL so heavy
computation can be done in the background while the Python interpreter
continues. No other function on this object should be called while the
invoke() call has not finished.
Raises:
ValueError: When the underlying interpreter fails raise ValueError.
"""
self._ensure_safe()
self._interpreter.Invoke()
def reset_all_variables(self):
return self._interpreter.ResetVariableTensors()
|
tensorflow-master
|
tensorflow/lite/python/interpreter.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import io
import numpy as np
import six
from tensorflow.lite.python import interpreter as interpreter_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class InterpreterTest(test_util.TensorFlowTestCase):
def testFloat(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testUint8(self):
model_path = resource_loader.get_path_to_datafile(
'testdata/permute_uint8.tflite')
with io.open(model_path, 'rb') as model_file:
data = model_file.read()
interpreter = interpreter_wrapper.Interpreter(model_content=data)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((1.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((1.0, 0), output_details[0]['quantization'])
test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
interpreter.resize_tensor_input(input_details[0]['index'],
test_input.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testString(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/gather_string.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([10] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
self.assertEqual('indices', input_details[1]['name'])
self.assertEqual(np.int64, input_details[1]['dtype'])
self.assertTrue(([3] == input_details[1]['shape']).all())
self.assertEqual((0.0, 0), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([3] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([1, 2, 3], dtype=np.int64)
interpreter.set_tensor(input_details[1]['index'], test_input)
test_input = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
expected_output = np.array([b'b', b'c', b'd'])
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
def testInvalidModelContent(self):
with self.assertRaisesRegexp(ValueError,
'Model provided has model identifier \''):
interpreter_wrapper.Interpreter(model_content=six.b('garbage'))
def testInvalidModelFile(self):
with self.assertRaisesRegexp(
ValueError, 'Could not open \'totally_invalid_file_name\''):
interpreter_wrapper.Interpreter(
model_path='totally_invalid_file_name')
def testInvokeBeforeReady(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
with self.assertRaisesRegexp(RuntimeError,
'Invoke called on model that is not ready'):
interpreter.invoke()
def testInvalidModelFileContent(self):
with self.assertRaisesRegexp(
ValueError, '`model_path` or `model_content` must be specified.'):
interpreter_wrapper.Interpreter(model_path=None, model_content=None)
def testInvalidIndex(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
# Invalid tensor index passed.
with self.assertRaisesRegexp(ValueError, 'Tensor with no shape found.'):
interpreter._get_tensor_details(4)
class InterpreterTensorAccessorTest(test_util.TensorFlowTestCase):
def setUp(self):
self.interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
self.interpreter.allocate_tensors()
self.input0 = self.interpreter.get_input_details()[0]['index']
self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
def testTensorAccessor(self):
"""Check that tensor returns a reference."""
array_ref = self.interpreter.tensor(self.input0)
np.copyto(array_ref(), self.initial_data)
self.assertAllEqual(array_ref(), self.initial_data)
self.assertAllEqual(
self.interpreter.get_tensor(self.input0), self.initial_data)
def testGetTensorAccessor(self):
"""Check that get_tensor returns a copy."""
self.interpreter.set_tensor(self.input0, self.initial_data)
array_initial_copy = self.interpreter.get_tensor(self.input0)
new_value = np.add(1., array_initial_copy)
self.interpreter.set_tensor(self.input0, new_value)
self.assertAllEqual(array_initial_copy, self.initial_data)
self.assertAllEqual(self.interpreter.get_tensor(self.input0), new_value)
def testBase(self):
self.assertTrue(self.interpreter._safe_to_run())
_ = self.interpreter.tensor(self.input0)
self.assertTrue(self.interpreter._safe_to_run())
in0 = self.interpreter.tensor(self.input0)()
self.assertFalse(self.interpreter._safe_to_run())
in0b = self.interpreter.tensor(self.input0)()
self.assertFalse(self.interpreter._safe_to_run())
# Now get rid of the buffers so that we can evaluate.
del in0
del in0b
self.assertTrue(self.interpreter._safe_to_run())
def testBaseProtectsFunctions(self):
in0 = self.interpreter.tensor(self.input0)()
# Make sure we get an exception if we try to run an unsafe operation
with self.assertRaisesRegexp(
RuntimeError, 'There is at least 1 reference'):
_ = self.interpreter.allocate_tensors()
# Make sure we get an exception if we try to run an unsafe operation
with self.assertRaisesRegexp(
RuntimeError, 'There is at least 1 reference'):
_ = self.interpreter.invoke()
# Now test that we can run
del in0 # this is our only buffer reference, so now it is safe to change
in0safe = self.interpreter.tensor(self.input0)
_ = self.interpreter.allocate_tensors()
del in0safe # make sure in0Safe is held but lint doesn't complain
class InterpreterDelegateTest(test_util.TensorFlowTestCase):
def setUp(self):
self._delegate_file = resource_loader.get_path_to_datafile(
'testdata/test_delegate.so')
self._model_file = resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite')
# Load the library to reset the counters.
library = ctypes.pydll.LoadLibrary(self._delegate_file)
library.initialize_counters()
def _TestInterpreter(self, model_path, options=None):
"""Test wrapper function that creates an interpreter with the delegate."""
delegate = interpreter_wrapper.load_delegate(self._delegate_file, options)
return interpreter_wrapper.Interpreter(
model_path=model_path, experimental_delegates=[delegate])
def testDelegate(self):
"""Tests the delegate creation and destruction."""
interpreter = self._TestInterpreter(model_path=self._model_file)
lib = interpreter._delegates[0]._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
del interpreter
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
def testMultipleInterpreters(self):
delegate = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
interpreter_a = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
interpreter_b = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del delegate
del interpreter_a
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del interpreter_b
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
def testOptions(self):
delegate_a = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate_a._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 0)
delegate_b = interpreter_wrapper.load_delegate(
self._delegate_file, options={
'unused': False,
'options_counter': 2
})
lib = delegate_b._library
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
del delegate_a
del delegate_b
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 2)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
def testFail(self):
with self.assertRaisesRegexp(ValueError, 'Failed to load delegate from .*'):
interpreter_wrapper.load_delegate(
self._delegate_file, options={'fail': 'fail'})
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/python/interpreter_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to select TF op usage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import lite
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import tracking
@test_util.run_v1_only('Incompatible with 2.0.')
class FromSessionTest(test_util.TensorFlowTestCase):
def testFlexMode(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
def testDeprecatedFlags(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.target_ops = set([lite.OpsSet.SELECT_TF_OPS])
# Ensure `target_ops` is set to the correct value after flag deprecation.
self.assertEqual(converter.target_ops, set([lite.OpsSet.SELECT_TF_OPS]))
self.assertEqual(converter.target_spec.supported_ops,
set([lite.OpsSet.SELECT_TF_OPS]))
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
class FromConcreteFunctionTest(test_util.TensorFlowTestCase):
@test_util.run_v2_only
def testFloat(self):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
tflite_model = converter.convert()
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/python/lite_flex_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python command line interface for running TOCO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.platform import app
def _parse_array(values, type_fn=str):
if values is not None:
return [type_fn(val) for val in values.split(",") if val]
return None
def _parse_set(values):
if values is not None:
return set([item for item in values.split(",") if item])
return None
def _parse_inference_type(value, flag):
"""Converts the inference type to the value of the constant.
Args:
value: str representing the inference type.
flag: str representing the flag name.
Returns:
tf.dtype.
Raises:
ValueError: Unsupported value.
"""
if value == "FLOAT":
return lite_constants.FLOAT
if value == "QUANTIZED_UINT8":
return lite_constants.QUANTIZED_UINT8
raise ValueError("Unsupported value for --{0}. Only FLOAT and "
"QUANTIZED_UINT8 are supported.".format(flag))
def _get_toco_converter(flags):
"""Makes a TFLiteConverter object based on the flags provided.
Args:
flags: argparse.Namespace object containing TFLite flags.
Returns:
TFLiteConverter object.
Raises:
ValueError: Invalid flags.
"""
# Parse input and output arrays.
input_arrays = _parse_array(flags.input_arrays)
input_shapes = None
if flags.input_shapes:
input_shapes_list = [
_parse_array(shape, type_fn=int)
for shape in flags.input_shapes.split(":")
]
input_shapes = dict(zip(input_arrays, input_shapes_list))
output_arrays = _parse_array(flags.output_arrays)
converter_kwargs = {
"input_arrays": input_arrays,
"input_shapes": input_shapes,
"output_arrays": output_arrays
}
# Create TFLiteConverter.
if flags.graph_def_file:
converter_fn = lite.TFLiteConverter.from_frozen_graph
converter_kwargs["graph_def_file"] = flags.graph_def_file
elif flags.saved_model_dir:
converter_fn = lite.TFLiteConverter.from_saved_model
converter_kwargs["saved_model_dir"] = flags.saved_model_dir
converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
converter_kwargs["signature_key"] = flags.saved_model_signature_key
elif flags.keras_model_file:
converter_fn = lite.TFLiteConverter.from_keras_model_file
converter_kwargs["model_file"] = flags.keras_model_file
else:
raise ValueError("--graph_def_file, --saved_model_dir, or "
"--keras_model_file must be specified.")
return converter_fn(**converter_kwargs)
def _convert_tf1_model(flags):
"""Calls function to convert the TensorFlow 1.X model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Invalid flags.
"""
# Create converter.
converter = _get_toco_converter(flags)
if flags.inference_type:
converter.inference_type = _parse_inference_type(flags.inference_type,
"inference_type")
if flags.inference_input_type:
converter.inference_input_type = _parse_inference_type(
flags.inference_input_type, "inference_input_type")
if flags.output_format:
converter.output_format = _toco_flags_pb2.FileFormat.Value(
flags.output_format)
if flags.mean_values and flags.std_dev_values:
input_arrays = converter.get_input_arrays()
std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)
# In quantized inference, mean_value has to be integer so that the real
# value 0.0 is exactly representable.
if converter.inference_type == lite_constants.QUANTIZED_UINT8:
mean_values = _parse_array(flags.mean_values, type_fn=int)
else:
mean_values = _parse_array(flags.mean_values, type_fn=float)
quant_stats = list(zip(mean_values, std_dev_values))
if ((not flags.input_arrays and len(input_arrays) > 1) or
(len(input_arrays) != len(quant_stats))):
raise ValueError("Mismatching --input_arrays, --std_dev_values, and "
"--mean_values. The flags must have the same number of "
"items. The current input arrays are '{0}'. "
"--input_arrays must be present when specifying "
"--std_dev_values and --mean_values with multiple input "
"tensors in order to map between names and "
"values.".format(",".join(input_arrays)))
converter.quantized_input_stats = dict(zip(input_arrays, quant_stats))
if (flags.default_ranges_min is not None) and (flags.default_ranges_max is
not None):
converter.default_ranges_stats = (flags.default_ranges_min,
flags.default_ranges_max)
if flags.drop_control_dependency:
converter.drop_control_dependency = flags.drop_control_dependency
if flags.reorder_across_fake_quant:
converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
if flags.change_concat_input_ranges:
converter.change_concat_input_ranges = (
flags.change_concat_input_ranges == "TRUE")
if flags.allow_custom_ops:
converter.allow_custom_ops = flags.allow_custom_ops
if flags.target_ops:
ops_set_options = lite.OpsSet.get_options()
converter.target_ops = set()
for option in flags.target_ops.split(","):
if option not in ops_set_options:
raise ValueError("Invalid value for --target_ops. Options: "
"{0}".format(",".join(ops_set_options)))
converter.target_spec.supported_ops.add(lite.OpsSet(option))
if flags.post_training_quantize:
converter.post_training_quantize = flags.post_training_quantize
if converter.inference_type == lite_constants.QUANTIZED_UINT8:
print("--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.")
converter.inference_type = lite_constants.FLOAT
if flags.dump_graphviz_dir:
converter.dump_graphviz_dir = flags.dump_graphviz_dir
if flags.dump_graphviz_video:
converter.dump_graphviz_vode = flags.dump_graphviz_video
# Convert model.
output_data = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(output_data)
def _convert_tf2_model(flags):
"""Calls function to convert the TensorFlow 2.0 model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Unsupported file format.
"""
# Load the model.
if flags.saved_model_dir:
converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir)
elif flags.keras_model_file:
model = keras.models.load_model(flags.keras_model_file)
converter = lite.TFLiteConverterV2.from_keras_model(model)
# Convert the model.
tflite_model = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(tflite_model)
def _check_tf1_flags(flags, unparsed):
"""Checks the parsed and unparsed flags to ensure they are valid in 1.X.
Raises an error if previously support unparsed flags are found. Raises an
error for parsed flags that don't meet the required conditions.
Args:
flags: argparse.Namespace object containing TFLite flags.
unparsed: List of unparsed flags.
Raises:
ValueError: Invalid flags.
"""
# Check unparsed flags for common mistakes based on previous TOCO.
def _get_message_unparsed(flag, orig_flag, new_flag):
if flag.startswith(orig_flag):
return "\n Use {0} instead of {1}".format(new_flag, orig_flag)
return ""
if unparsed:
output = ""
for flag in unparsed:
output += _get_message_unparsed(flag, "--input_file", "--graph_def_file")
output += _get_message_unparsed(flag, "--savedmodel_directory",
"--saved_model_dir")
output += _get_message_unparsed(flag, "--std_value", "--std_dev_values")
output += _get_message_unparsed(flag, "--batch_size", "--input_shapes")
output += _get_message_unparsed(flag, "--dump_graphviz",
"--dump_graphviz_dir")
if output:
raise ValueError(output)
# Check that flags are valid.
if flags.graph_def_file and (not flags.input_arrays or
not flags.output_arrays):
raise ValueError("--input_arrays and --output_arrays are required with "
"--graph_def_file")
if flags.input_shapes:
if not flags.input_arrays:
raise ValueError("--input_shapes must be used with --input_arrays")
if flags.input_shapes.count(":") != flags.input_arrays.count(","):
raise ValueError("--input_shapes and --input_arrays must have the same "
"number of items")
if flags.std_dev_values or flags.mean_values:
if bool(flags.std_dev_values) != bool(flags.mean_values):
raise ValueError("--std_dev_values and --mean_values must be used "
"together")
if flags.std_dev_values.count(",") != flags.mean_values.count(","):
raise ValueError("--std_dev_values, --mean_values must have the same "
"number of items")
if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):
raise ValueError("--default_ranges_min and --default_ranges_max must be "
"used together")
if flags.dump_graphviz_video and not flags.dump_graphviz_dir:
raise ValueError("--dump_graphviz_video must be used with "
"--dump_graphviz_dir")
def _get_tf1_parser():
"""Returns ArgumentParser for tflite_convert for TensorFlow 1.X."""
parser = argparse.ArgumentParser(
description=("Command line tool to run TensorFlow Lite Converter."))
# Output file flag.
parser.add_argument(
"--output_file",
type=str,
help="Full filepath of the output file.",
required=True)
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group(required=True)
input_file_group.add_argument(
"--graph_def_file",
type=str,
help="Full filepath of file containing frozen TensorFlow GraphDef.")
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full filepath of directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
# Model format flags.
parser.add_argument(
"--output_format",
type=str.upper,
choices=["TFLITE", "GRAPHVIZ_DOT"],
help="Output file format.")
parser.add_argument(
"--inference_type",
type=str.upper,
choices=["FLOAT", "QUANTIZED_UINT8"],
help="Target data type of real-number arrays in the output file.")
parser.add_argument(
"--inference_input_type",
type=str.upper,
choices=["FLOAT", "QUANTIZED_UINT8"],
help=("Target data type of real-number input arrays. Allows for a "
"different type for input arrays in the case of quantization."))
# Input and output arrays flags.
parser.add_argument(
"--input_arrays",
type=str,
help="Names of the input arrays, comma-separated.")
parser.add_argument(
"--input_shapes",
type=str,
help="Shapes corresponding to --input_arrays, colon-separated.")
parser.add_argument(
"--output_arrays",
type=str,
help="Names of the output arrays, comma-separated.")
# SavedModel related flags.
parser.add_argument(
"--saved_model_tag_set",
type=str,
help=("Comma-separated set of tags identifying the MetaGraphDef within "
"the SavedModel to analyze. All tags must be present. In order to "
"pass in an empty tag set, pass in \"\". (default \"serve\")"))
parser.add_argument(
"--saved_model_signature_key",
type=str,
help=("Key identifying the SignatureDef containing inputs and outputs. "
"(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)"))
# Quantization flags.
parser.add_argument(
"--std_dev_values",
type=str,
help=("Standard deviation of training data for each input tensor, "
"comma-separated floats. Used for quantized input tensors. "
"(default None)"))
parser.add_argument(
"--mean_values",
type=str,
help=("Mean of training data for each input tensor, comma-separated "
"floats. Used for quantized input tensors. (default None)"))
parser.add_argument(
"--default_ranges_min",
type=float,
help=("Default value for min bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
parser.add_argument(
"--default_ranges_max",
type=float,
help=("Default value for max bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
# quantize_weights is DEPRECATED.
parser.add_argument(
"--quantize_weights",
dest="post_training_quantize",
action="store_true",
help=argparse.SUPPRESS)
parser.add_argument(
"--post_training_quantize",
dest="post_training_quantize",
action="store_true",
help=(
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy). (default False)"))
# Graph manipulation flags.
parser.add_argument(
"--drop_control_dependency",
action="store_true",
help=("Boolean indicating whether to drop control dependencies silently. "
"This is due to TensorFlow not supporting control dependencies. "
"(default True)"))
parser.add_argument(
"--reorder_across_fake_quant",
action="store_true",
help=("Boolean indicating whether to reorder FakeQuant nodes in "
"unexpected locations. Used when the location of the FakeQuant "
"nodes is preventing graph transformations necessary to convert "
"the graph. Results in a graph that differs from the quantized "
"training graph, potentially causing differing arithmetic "
"behavior. (default False)"))
# Usage for this flag is --change_concat_input_ranges=true or
# --change_concat_input_ranges=false in order to make it clear what the flag
# is set to. This keeps the usage consistent with other usages of the flag
# where the default is different. The default value here is False.
parser.add_argument(
"--change_concat_input_ranges",
type=str.upper,
choices=["TRUE", "FALSE"],
help=("Boolean to change behavior of min/max ranges for inputs and "
"outputs of the concat operator for quantized models. Changes the "
"ranges of concat operator overlap when true. (default False)"))
# Permitted ops flags.
parser.add_argument(
"--allow_custom_ops",
action="store_true",
help=("Boolean indicating whether to allow custom operations. When false "
"any unknown operation is an error. When true, custom ops are "
"created for any op that is unknown. The developer will need to "
"provide these to the TensorFlow Lite runtime with a custom "
"resolver. (default False)"))
parser.add_argument(
"--target_ops",
type=str,
help=("Experimental flag, subject to change. Set of OpsSet options "
"indicating which converter to use. Options: {0}. One or more "
"option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))"
"".format(",".join(lite.OpsSet.get_options()))))
# Logging flags.
parser.add_argument(
"--dump_graphviz_dir",
type=str,
help=("Full filepath of folder to dump the graphs at various stages of "
"processing GraphViz .dot files. Preferred over --output_format="
"GRAPHVIZ_DOT in order to keep the requirements of the output "
"file."))
parser.add_argument(
"--dump_graphviz_video",
action="store_true",
help=("Boolean indicating whether to dump the graph after every graph "
"transformation"))
return parser
def _get_tf2_parser():
"""Returns ArgumentParser for tflite_convert for TensorFlow 2.0."""
parser = argparse.ArgumentParser(
description=("Command line tool to run TensorFlow Lite Converter."))
# Output file flag.
parser.add_argument(
"--output_file",
type=str,
help="Full filepath of the output file.",
required=True)
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group(required=True)
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full path of the directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
return parser
def run_main(_):
"""Main in toco_convert.py."""
if tf2.enabled():
parser = _get_tf2_parser()
else:
parser = _get_tf1_parser()
tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
if tf2.enabled():
_convert_tf2_model(tflite_flags)
else:
try:
_check_tf1_flags(tflite_flags, unparsed)
except ValueError as e:
parser.print_usage()
file_name = os.path.basename(sys.argv[0])
sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e)))
sys.exit(1)
_convert_tf1_model(tflite_flags)
def main():
app.run(main=run_main, argv=sys.argv[:1])
if __name__ == "__main__":
main()
|
tensorflow-master
|
tensorflow/lite/python/tflite_convert.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import enum
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.saving import saving_utils as _saving_utils
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating tflite graphs.
Some optimizations may come at the cost of accuracy.
"""
# Default optimization strategy.
#
# Converter will do its best to improve size and latency based on the
# information provided.
# Enhanced optimizations can be gained by providing a representative_dataset.
# This is recommended, and is currently equivalent to the modes below.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
DEFAULT = "DEFAULT"
# Optimize for size.
#
# Optimizations that reduce the size of the model.
# The model size will be reduced.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Optimize for latency.
#
# Optimizations that reduce the latency of the model.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
def __str__(self):
return self.value
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset to evaluate optimizations.
A representative dataset that can be used to evaluate optimizations by the
converter. E.g. converter can use these examples to estimate (min, max) ranges
by calibrating the model on inputs. This can allow converter to quantize a
converted floating point model.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: an input generator that can be used to generate input samples
for the model. This must be a callable object that returns an object
that supports the `iter()` protocol (e.g. a generator function). The
elements generated must have same type and shape as inputs to the model.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device.
Details about target device. Converter optimizes the generated model for
specific device.
Attributes:
supported_ops: Experimental flag, subject to change. Set of OpsSet options
supported by the device. (default set([OpsSet.TFLITE_BUILTINS]))
"""
def __init__(self, supported_ops=None):
if supported_ops is None:
supported_ops = set([OpsSet.TFLITE_BUILTINS])
self.supported_ops = supported_ops
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.representative_dataset = None
self.optimizations = []
self._target_ops = set([OpsSet.TFLITE_BUILTINS])
def _grappler_config(self):
is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == set(self._target_ops)
optimizers = ["constfold"]
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _validate_representative_dataset(self):
if self.representative_dataset:
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
if self.representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
elif self._int8_target_required():
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 target.")
def _int8_target_required(self):
return set([OpsSet.TFLITE_BUILTINS_INT8]) == set(self._target_ops)
def _is_post_training_optimize(self):
return (self._int8_target_required() or bool(
set(self.optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
])))
def _is_weight_only_quantize(self):
return (self._is_post_training_optimize() and
(self.representative_dataset is None))
def _is_calibration_quantize(self):
return self._is_post_training_optimize() and self.representative_dataset
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type):
allow_float = not self._int8_target_required()
calibrate_quantize = _calibrator.Calibrator(result)
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float)
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteConverterBase):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use the
dataset to evaluate different optimizations.
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
self.allow_custom_ops = False
self.target_spec = TargetSpec()
@classmethod
def from_concrete_functions(cls, funcs):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call from_concrete_function.")
raise ValueError(message)
return cls(funcs)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set(SERVING))
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
func = _saving_utils.trace_model_call(model)
concrete_func = func.get_concrete_function()
return cls([concrete_func])
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# TODO(b/130297984): Add support for converting multiple function.
self._target_ops = self.target_spec.supported_ops
if len(self._funcs) != 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
self._funcs[0])
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
graph_def = frozen_func.graph.as_graph_def()
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=self._grappler_config(),
graph=frozen_func.graph)
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
self._validate_representative_dataset()
converter_kwargs = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": self._is_weight_only_quantize(),
"target_ops": self.target_spec.supported_ops,
}
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(result, constants.FLOAT,
constants.FLOAT)
return result
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteConverterBase):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this
parameter is ignored. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays.
If an integer type is provided and `optimizations` are not used,
`quantized_inputs_stats` must be provided.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained input model, then
`inference_input_type` defaults to tf.uint8.
In all other cases, `inference_input_type` defaults to tf.float32.
Must be `{tf.float32, tf.uint8, tf.int8}`
inference_output_type: Target data type of real-number output arrays. Allows
for a different type for output arrays.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained output model, then
`inference_output_type` defaults to tf.uint8.
In all other cases, `inference_output_type` must be tf.float32, an error
will be thrown otherwise.
Must be `{tf.float32, tf.uint8, tf.int8}`
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for
`optimizations` instead. Boolean indicating whether to quantize the
weights of the converted float model. Model size will be reduced and
there will be latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Deprecated. Please specify `target_spec.supported_ops` instead.
Set of OpsSet options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use
the dataset to evaluate different optimizations.
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TFLiteConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter, self).__init__()
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.allow_custom_ops = False
self._post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.target_spec = TargetSpec()
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
function = _saving_utils.trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
return cls(frozen_func.graph.as_graph_def(), frozen_func.inputs,
frozen_func.outputs)
# Handles Keras when Eager mode is disabled.
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
self._target_ops = self.target_spec.supported_ops
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
self._validate_representative_dataset()
toco_inference_input_type = self.inference_input_type
inference_input_type = self.inference_input_type
inference_output_type = self.inference_output_type
post_training_optimize = self._is_post_training_optimize()
if post_training_optimize:
# Post training optimizations require that TOCO outputs a float model.
if self.inference_type != constants.FLOAT:
raise ValueError(
"`optimizations` require that `inference_type` is set to float.")
toco_inference_input_type = constants.FLOAT
# Set up default values.
if inference_input_type is None:
inference_input_type = constants.FLOAT
if inference_output_type is None:
inference_output_type = constants.FLOAT
weight_only_quantize = self._is_weight_only_quantize()
if weight_only_quantize:
# Currently, weight only quantization requires float inputs and outputs.
if (inference_input_type != constants.FLOAT or
inference_output_type != constants.FLOAT):
raise ValueError(
"Provide an inference_input_type and inference_output_type of type "
"tf.float32.")
if not post_training_optimize and self.inference_output_type is not None:
raise ValueError(
"inference_output_type is currently not supported if optimizations "
"are not enabled.")
converter_kwargs = {
"inference_type": self.inference_type,
"inference_input_type": toco_inference_input_type,
"input_format": constants.TENSORFLOW_GRAPHDEF,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": weight_only_quantize,
"target_ops": self._target_ops,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video
}
optimized_graph = self._graph_def
if self.inference_type != constants.QUANTIZED_UINT8:
try:
optimized_graph = _run_graph_optimizations(
self._graph_def,
self._input_tensors,
self._output_tensors,
config=self._grappler_config())
except Exception:
optimized_graph = self._graph_def
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(result, inference_input_type,
inference_output_type)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
shape[0] = batch_size
tensor.set_shape(shape)
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
|
tensorflow-master
|
tensorflow/lite/python/lite.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
class TestModels(test_util.TensorFlowTestCase):
def _evaluateTFLiteModel(self, tflite_model, input_data):
"""Evaluates the model on the `input_data`."""
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return interpreter.get_tensor(output_details[0]['index'])
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
return BasicModel()
class FromConcreteFunctionTest(TestModels):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
self.assertIn('call from_concrete_function', str(error.exception))
@test_util.run_v2_only
def testFloat(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func])
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('can only convert a single ConcreteFunction',
str(error.exception))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 5, 5, 3], dtype=dtypes.float32)
])
def func(inp):
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (to_save, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
class FromSavedModelTest(TestModels):
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Ensure the converter generates.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertEqual(len(converter._funcs), 2)
# Try converting multiple functions.
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('This converter can only convert a single ConcreteFunction',
str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
class FromKerasModelTest(TestModels):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = constant_op.constant(1., shape=[1, 3])
right_input_data = constant_op.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
np.testing.assert_almost_equal(tf_result[0], tflite_result, 5)
class GrapplerTest(TestModels):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = constant_op.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.],
shape=[3, 3])
@def_function.function
def func(x):
y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
return math_ops.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
np.testing.assert_array_equal(expected_value.numpy(), actual_value)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/python/lite_v2_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.