python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os as _os
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# pylint: disable=g-bad-import-order
# API IMPORTS PLACEHOLDER
# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
except ImportError:
pass
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
app.flags = flags # pylint: disable=undefined-variable
| tensorflow-master | tensorflow/compat_template_v1.__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Top-level module of TensorFlow. By convention, we refer to this module as
`tf` instead of `tensorflow`, following the common practice of importing
TensorFlow via the command `import tensorflow as tf`.
The primary function of this module is to import all of the public TensorFlow
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
Note that the file `__init__.py` in the TensorFlow source code tree is actually
only a placeholder to enable test cases to run. The TensorFlow build replaces
this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import logging as _logging
import os as _os
import site as _site
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# API IMPORTS PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
try:
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
except ImportError:
pass
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
for s in _site_packages_dirs:
# TODO(gunan): Add sanity checks to loaded modules here.
plugin_dir = _os.path.join(s, 'tensorflow-plugins')
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
if '__all__' in vars():
vars()['__all__'].remove('python')
del core
if '__all__' in vars():
vars()['__all__'].remove('core')
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
pass
# Similarly for compiler. Do it separately to make sure we do this even if the
# others don't exist.
try:
del compiler
if '__all__' in vars():
vars()['__all__'].remove('compiler')
except NameError:
pass
# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
# pylint: enable=undefined-variable
| tensorflow-master | tensorflow/api_template.__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import logging as _logging
import os as _os
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# pylint: disable=g-bad-import-order
# API IMPORTS PLACEHOLDER
# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
except ImportError:
_logging.warning(
"Limited tf.compat.v2.summary API due to missing TensorBoard "
"installation.")
try:
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
except ImportError:
pass
# We would like the following to work for fully enabling 2.0 in a 1.0 install:
#
# import tensorflow.compat.v2 as tf
# tf.enable_v2_behavior()
#
# This make this one symbol available directly.
from tensorflow.python.compat.v2_compat import enable_v2_behavior # pylint: disable=g-import-not-at-top
# Add module aliases
_current_module = _sys.modules[__name__]
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
| tensorflow-master | tensorflow/compat_template.__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Bring in all of the public TensorFlow interface into this
# module.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.util.lazy_loader import LazyLoader
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
from tensorflow.python.platform import app # pylint: disable=g-import-not-at-top
app.flags = flags
del absolute_import
del division
del print_function
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
del python
del core
# pylint: enable=undefined-variable
| tensorflow-master | tensorflow/__init__.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# LINT.IfChange
"""TensorFlow root package"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import sys as _sys
import importlib as _importlib
import types as _types
# Since TensorFlow Python code now resides in tensorflow_core but TensorFlow
# ecosystem code (e.g. estimator, but also even tensorflow) imports tensorflow
# we need to do forwarding between the two. To do so, we use a lazy loader to
# load and forward the top level modules. We cannot use the LazyLoader defined
# by tensorflow at tensorflow/python/util/lazy_loader.py as to use that we would
# already need to import tensorflow. Hence, we define it inline.
class _LazyLoader(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.lite",
"tensorflow.keras",
"tensorflow.compat",
"tensorflow.summary", # tensorboard
"tensorflow.examples",
]
# Estimator needs to be handled separatedly so we can still allow both
# import tensorflow_estimator and import tensorflow.estimator work
# Only in the second case do we actually need to do forwarding, the first case
# already defines most of the hierarchy and eagerly forwarding would result in
# an import loop.
if "tensorflow_estimator" not in _sys.modules:
_root_estimator = False
_top_level_modules.append("tensorflow.estimator")
else:
_root_estimator = True
# Lazy load all of the _top_level_modules, we don't need their names anymore
for _m in _top_level_modules:
_forward_module(_m)
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
# We also need to bring in keras if available in tensorflow_core
# Above import * doesn't import it as __all__ is updated before keras is hooked
try:
from tensorflow_core import keras
except ImportError as e:
pass
# Similarly for estimator, but only if this file is not read via a
# import tensorflow_estimator (same reasoning as above when forwarding estimator
# separatedly from the rest of the top level modules)
if not _root_estimator:
try:
from tensorflow_core import estimator
except ImportError as e:
pass
# And again for tensorboard (comes as summary)
try:
from tensorflow_core import summary
except ImportError as e:
pass
# Also import module aliases
try:
from tensorflow_core import losses, metrics, initializers, optimizers
except ImportError:
pass
# LINT.ThenChange(//tensorflow/virtual_root_template_v1.__init__.py.oss)
| tensorflow-master | tensorflow/virtual_root_template_v2.__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import os as _os
import site as _site
import sys as _sys
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.tools import module_util as _module_util
# API IMPORTS PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
try:
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
except ImportError:
pass
from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
_CONTRIB_WARNING = """
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
"""
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib',
_CONTRIB_WARNING)
del LazyLoader
# The templated code that replaces the placeholder above sometimes
# sets the __all__ variable. If it does, we have to be sure to add
# "contrib".
if '__all__' in vars():
vars()['__all__'].append('contrib')
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
# The 'app' module will be imported as part of the placeholder section above.
app.flags = flags # pylint: disable=undefined-variable
if '__all__' in vars():
vars()['__all__'].append('flags')
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
for s in _site_packages_dirs:
# TODO(gunan): Add sanity checks to loaded modules here.
plugin_dir = _os.path.join(s, 'tensorflow-plugins')
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
if '__all__' in vars():
vars()['__all__'].remove('python')
del core
if '__all__' in vars():
vars()['__all__'].remove('core')
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
pass
# Similarly for compiler. Do it separately to make sure we do this even if the
# others don't exist.
try:
del compiler
if '__all__' in vars():
vars()['__all__'].remove('compiler')
except NameError:
pass
# pylint: enable=undefined-variable
| tensorflow-master | tensorflow/api_template_v1.__init__.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# LINT.IfChange
"""TensorFlow root package"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import sys as _sys
import importlib as _importlib
import types as _types
# Since TensorFlow Python code now resides in tensorflow_core but TensorFlow
# ecosystem code (e.g. estimator, but also even tensorflow) imports tensorflow
# we need to do forwarding between the two. To do so, we use a lazy loader to
# load and forward the top level modules. We cannot use the LazyLoader defined
# by tensorflow at tensorflow/python/util/lazy_loader.py as to use that we would
# already need to import tensorflow. Hence, we define it inline.
class _LazyLoader(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.lite",
"tensorflow.keras",
"tensorflow.contrib",
"tensorflow.compat",
"tensorflow.summary", # tensorboard
"tensorflow.examples",
]
# Estimator needs to be handled separatedly so we can still allow both
# import tensorflow_estimator and import tensorflow.estimator work
# Only in the second case do we actually need to do forwarding, the first case
# already defines most of the hierarchy and eagerly forwarding would result in
# an import loop.
if "tensorflow_estimator" not in _sys.modules:
_root_estimator = False
_top_level_modules.append("tensorflow.estimator")
else:
_root_estimator = True
# Lazy load all of the _top_level_modules, we don't need their names anymore
for _m in _top_level_modules:
_forward_module(_m)
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
# We also need to bring in keras if available in tensorflow_core
# Above import * doesn't import it as __all__ is updated before keras is hooked
try:
from tensorflow_core import keras
except ImportError as e:
pass
# Similarly for estimator, but only if this file is not read via a
# import tensorflow_estimator (same reasoning as above when forwarding estimator
# separatedly from the rest of the top level modules)
if not _root_estimator:
try:
from tensorflow_core import estimator
except ImportError as e:
pass
# And again for tensorboard (comes as summary)
try:
from tensorflow_core import summary
except ImportError as e:
pass
# In V1 API we need to print deprecation messages
from tensorflow.python.util import deprecation_wrapper as _deprecation
if not isinstance(_sys.modules[__name__], _deprecation.DeprecationWrapper):
_sys.modules[__name__] = _deprecation.DeprecationWrapper(
_sys.modules[__name__], "")
# LINT.ThenChange(//tensorflow/virtual_root_template_v2.__init__.py.oss)
| tensorflow-master | tensorflow/virtual_root_template_v1.__init__.py |
tensorflow-master | tensorflow/tools/__init__.py |
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.x TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import ipynb
from tensorflow.tools.compatibility import tf_upgrade_v2
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
# Make straightforward changes to convert to 2.0. In harder cases,
# use compat.v1.
_DEFAULT_MODE = "DEFAULT"
# Convert to use compat.v1.
_SAFETY_MODE = "SAFETY"
def process_file(in_filename, out_filename, upgrader):
"""Process a file of type `.py` or `.ipynb`."""
if in_filename.endswith(".py"):
files_processed, report_text, errors = \
upgrader.process_file(in_filename, out_filename)
elif in_filename.endswith(".ipynb"):
files_processed, report_text, errors = \
ipynb.process_file(in_filename, out_filename, upgrader)
else:
raise NotImplementedError(
"Currently converter only supports python or ipynb")
return files_processed, report_text, errors
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file from 1.x to 2.0
Simple usage:
tf_upgrade_v2.py --infile foo.py --outfile bar.py
tf_upgrade_v2.py --infile foo.ipynb --outfile bar.ipynb
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=True)
parser.add_argument(
"--inplace",
dest="in_place",
help=("If converting a set of files, whether to "
"allow the conversion to be performed on the "
"input files."),
action="store_true")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
parser.add_argument(
"--mode",
dest="mode",
choices=[_DEFAULT_MODE, _SAFETY_MODE],
help=("Upgrade script mode. Supported modes:\n"
"%s: Perform only straightforward conversions to upgrade to "
"2.0. In more difficult cases, switch to use compat.v1.\n"
"%s: Keep 1.* code intact and import compat.v1 "
"module. Also disable 2.0 behavior to ensure code "
"that requires 1.X behavior continues to work." %
(_DEFAULT_MODE, _SAFETY_MODE)),
default=_DEFAULT_MODE)
parser.add_argument(
"--print_all",
dest="print_all",
help="Print full log to stdout instead of just printing errors",
action="store_true")
args = parser.parse_args()
if args.mode == _SAFETY_MODE:
change_spec = tf_upgrade_v2_safety.TFAPIChangeSpec()
else:
change_spec = tf_upgrade_v2.TFAPIChangeSpec()
upgrade = ast_edits.ASTCodeUpgrader(change_spec)
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
if not args.in_place and not args.output_file:
raise ValueError(
"--outfile=<output file> argument is required when converting a "
"single file.")
if args.in_place and args.output_file:
raise ValueError(
"--outfile argument is invalid when when converting in place")
output_file = args.input_file if args.in_place else args.output_file
files_processed, report_text, errors = process_file(
args.input_file, output_file, upgrade)
errors = {args.input_file: errors}
files_processed = 1
elif args.input_tree:
if not args.in_place and not args.output_tree:
raise ValueError(
"--outtree=<output directory> argument is required when converting a "
"file tree.")
if args.in_place and args.output_tree:
raise ValueError(
"--outtree argument is invalid when when converting in place")
output_tree = args.input_tree if args.in_place else args.output_tree
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
num_errors = 0
report = []
for f in errors:
if errors[f]:
num_errors += len(errors[f])
report.append("-" * 80 + "\n")
report.append("File: %s\n" % f)
report.append("-" * 80 + "\n")
report.append("\n".join(errors[f]) + "\n")
report = ("TensorFlow 2.0 Upgrade Script\n"
"-----------------------------\n"
"Converted %d files\n" % files_processed +
"Detected %d issues that require attention" % num_errors + "\n" +
"-" * 80 + "\n") + "".join(report)
detailed_report_header = "=" * 80 + "\n"
detailed_report_header += "Detailed log follows:\n\n"
detailed_report_header += "=" * 80 + "\n"
with open(report_filename, "w") as report_file:
report_file.write(report)
report_file.write(detailed_report_header)
report_file.write(report_text)
if args.print_all:
print(report)
print(detailed_report_header)
print(report_text)
else:
print(report)
print("\nMake sure to read the detailed log %r\n" % report_filename)
if __name__ == "__main__":
main()
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_v2_main.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module deprecation warnings for TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import ast_edits
_CONTRIB_WARNING = (
ast_edits.ERROR,
"<function name> cannot be converted automatically. tf.contrib will not"
" be distributed with TensorFlow 2.0, please consider an alternative in"
" non-contrib TensorFlow, a community-maintained repository such as "
"tensorflow/addons, or fork the required code.")
_FLAGS_WARNING = (
ast_edits.ERROR,
"tf.flags has been removed, please use the argparse or absl"
" modules if you need command line parsing."
)
_CONTRIB_CUDNN_RNN_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.cudnn_rnn.* has been deprecated, "
"and the CuDNN kernel has been integrated with "
"tf.keras.layers.LSTM/GRU in TensorFlow 2.0. Please check the new API "
"and use that instead."
)
_CONTRIB_RNN_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.rnn.* has been deprecated, and "
"widely used cells/functions will be moved to tensorflow/addons "
"repository. Please check it there and file Github issues if necessary."
)
_CONTRIB_DIST_STRAT_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.distribute.* have been migrated to"
"tf.distribute.*. Please check out the new module for updates APIs.")
MODULE_DEPRECATIONS = {
"tf.contrib": _CONTRIB_WARNING,
"tf.contrib.cudnn_rnn": _CONTRIB_CUDNN_RNN_WARNING,
"tf.contrib.rnn": _CONTRIB_RNN_WARNING,
"tf.flags": _FLAGS_WARNING,
"tf.contrib.distribute": _CONTRIB_DIST_STRAT_WARNING
}
| tensorflow-master | tensorflow/tools/compatibility/module_deprecations_v2.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a list of renames between TensorFlow 1.* and 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import renames_v2
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames in tf_upgrade_v2.py,
# use the OLD function name.
# These renames happen after the arguments have been processed.
manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.compat.v1.batch_gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_patches",
"tf.image.extract_image_patches":
"tf.image.extract_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.GFile":
"tf.io.gfile.GFile",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Open":
"tf.io.gfile.GFile",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.cluster_resolver.ClusterResolver":
"tf.distribute.cluster_resolver.ClusterResolver",
"tf.contrib.cluster_resolver.GceClusterResolver":
"tf.distribute.cluster_resolver.GCEClusterResolver",
"tf.contrib.cluster_resolver.KubernetesClusterResolver":
"tf.distribute.cluster_resolver.KubernetesClusterResolver",
"tf.contrib.cluster_resolver.SimpleClusterResolver":
"tf.distribute.cluster_resolver.SimpleClusterResolver",
"tf.contrib.cluster_resolver.SlurmClusterResolver":
"tf.distribute.cluster_resolver.SlurmClusterResolver",
"tf.contrib.cluster_resolver.TFConfigClusterResolver":
"tf.distribute.cluster_resolver.TFConfigClusterResolver",
"tf.contrib.cluster_resolver.TPUClusterResolver":
"tf.distribute.cluster_resolver.TPUClusterResolver",
"tf.contrib.cluster_resolver.UnionClusterResolver":
"tf.distribute.cluster_resolver.UnionClusterResolver",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.distribute.CrossDeviceOps":
"tf.distribute.CrossDeviceOps",
"tf.contrib.distribute.ReductionToOneDeviceCrossDeviceOps":
"tf.distribute.ReductionToOneDevice",
"tf.contrib.estimator.make_early_stopping_hook":
"tf.estimator.experimental.make_early_stopping_hook",
"tf.contrib.estimator.stop_if_higher_hook":
"tf.estimator.experimental.stop_if_higher_hook",
"tf.contrib.estimator.stop_if_lower_hook":
"tf.estimator.experimental.stop_if_lower_hook",
"tf.contrib.estimator.stop_if_no_decrease_hook":
"tf.estimator.experimental.stop_if_no_decrease_hook",
"tf.contrib.estimator.stop_if_no_increase_hook":
"tf.estimator.experimental.stop_if_no_increase_hook",
"tf.contrib.framework.CriticalSection":
"tf.CriticalSection",
"tf.contrib.framework.is_tensor":
"tf.is_tensor",
"tf.contrib.framework.load_variable":
"tf.train.load_variable",
"tf.contrib.framework.nest.assert_same_structure":
"tf.nest.assert_same_structure",
"tf.contrib.framework.nest.flatten":
"tf.nest.flatten",
"tf.contrib.framework.nest.is_sequence":
"tf.nest.is_nested",
"tf.contrib.framework.nest.map_structure":
"tf.nest.map_structure",
"tf.contrib.framework.nest.pack_sequence_as":
"tf.nest.pack_sequence_as",
"tf.contrib.batching.batch_function":
"tf.nondifferentiable_batch_function",
"tf.contrib.util.constant_value":
"tf.get_static_value",
"tf.contrib.saved_model.load_keras_model":
"tf.keras.experimental.load_from_saved_model",
"tf.contrib.saved_model.save_keras_model":
"tf.keras.experimental.export_saved_model",
"tf.contrib.rnn.RNNCell":
"tf.compat.v1.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.rnn.BasicLSTMCell":
"tf.compat.v1.nn.rnn_cell.BasicLSTMCell",
"tf.contrib.rnn.BasicRNNCell":
"tf.compat.v1.nn.rnn_cell.BasicRNNCell",
"tf.contrib.rnn.GRUCell":
"tf.compat.v1.nn.rnn_cell.GRUCell",
"tf.contrib.rnn.LSTMCell":
"tf.compat.v1.nn.rnn_cell.LSTMCell",
"tf.contrib.rnn.MultiRNNCell":
"tf.compat.v1.nn.rnn_cell.MultiRNNCell",
"tf.contrib.rnn.static_rnn":
"tf.compat.v1.nn.static_rnn",
"tf.contrib.rnn.static_state_saving_rnn":
"tf.compat.v1.nn.static_state_saving_rnn",
"tf.contrib.rnn.static_bidirectional_rnn":
"tf.compat.v1.nn.static_bidirectional_rnn",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.contrib.summary.all_summary_ops":
"tf.compat.v1.summary.all_v2_summary_ops",
"tf.contrib.summary.always_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.audio":
"tf.compat.v2.summary.audio",
"tf.contrib.summary.create_file_writer":
"tf.compat.v2.summary.create_file_writer",
"tf.contrib.summary.flush":
"tf.compat.v2.summary.flush",
"tf.contrib.summary.generic":
"tf.compat.v2.summary.write",
"tf.contrib.summary.histogram":
"tf.compat.v2.summary.histogram",
"tf.contrib.summary.image":
"tf.compat.v2.summary.image",
"tf.contrib.summary.initialize":
"tf.compat.v1.summary.initialize",
"tf.contrib.summary.never_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.scalar":
"tf.compat.v2.summary.scalar",
"tf.contrib.tpu.CrossShardOptimizer":
"tf.compat.v1.tpu.CrossShardOptimizer",
"tf.contrib.tpu.InputPipelineConfig":
"tf.compat.v1.estimator.tpu.InputPipelineConfig",
"tf.contrib.tpu.RunConfig":
"tf.compat.v1.estimator.tpu.RunConfig",
"tf.contrib.tpu.TPUConfig":
"tf.compat.v1.estimator.tpu.TPUConfig",
"tf.contrib.tpu.TPUEstimator":
"tf.compat.v1.estimator.tpu.TPUEstimator",
"tf.contrib.tpu.TPUEstimatorSpec":
"tf.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tf.contrib.tpu.batch_parallel":
"tf.compat.v1.tpu.batch_parallel",
"tf.contrib.tpu.bfloat16_scope":
"tf.compat.v1.tpu.bfloat16_scope",
"tf.contrib.tpu.core":
"tf.compat.v1.tpu.core",
"tf.contrib.tpu.cross_replica_sum":
"tf.compat.v1.tpu.cross_replica_sum",
"tf.contrib.tpu.initialize_system":
"tf.compat.v1.tpu.initialize_system",
"tf.contrib.tpu.outside_compilation":
"tf.compat.v1.tpu.outside_compilation",
"tf.contrib.tpu.replicate":
"tf.compat.v1.tpu.replicate",
"tf.contrib.tpu.rewrite":
"tf.compat.v1.tpu.rewrite",
"tf.contrib.tpu.shard":
"tf.compat.v1.tpu.shard",
"tf.contrib.tpu.shutdown_system":
"tf.compat.v1.tpu.shutdown_system",
"tf.contrib.training.checkpoints_iterator":
"tf.train.checkpoints_iterator",
"tf.contrib.layers.recompute_grad":
"tf.recompute_grad",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
# TODO(b/129398290)
"tf.string_split":
"tf.compat.v1.string_split",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.sdca_fprint":
"tf.raw_ops.SdcaFprint",
"tf.train.sdca_optimizer":
"tf.raw_ops.SdcaOptimizer",
"tf.train.sdca_shrink_l1":
"tf.raw_ops.SdcaShrinkL1",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
# tf.saved_model.load in 1.x has no equivalent in 2.x, but there is a
# symbol with the same name.
"tf.saved_model.load":
"tf.compat.v1.saved_model.load",
"tf.saved_model.load_v2":
"tf.compat.v2.saved_model.load",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.errors.exception_type_from_error_code":
"tf.compat.v1.errors.exception_type_from_error_code",
"tf.errors.error_code_from_exception_type":
"tf.compat.v1.errors.error_code_from_exception_type",
"tf.errors.raise_exception_on_not_ok_status":
"tf.compat.v1.errors.raise_exception_on_not_ok_status",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
"tf.nn.max_pool":
"tf.nn.max_pool2d",
"tf.nn.avg_pool":
"tf.nn.avg_pool2d",
"tf.keras.initializers.zeros":
"tf.compat.v1.keras.initializers.zeros",
"tf.keras.initializers.Zeros":
"tf.compat.v1.keras.initializers.Zeros",
"tf.keras.initializers.ones":
"tf.compat.v1.keras.initializers.ones",
"tf.keras.initializers.Ones":
"tf.compat.v1.keras.initializers.Ones",
"tf.keras.initializers.constant":
"tf.compat.v1.keras.initializers.constant",
"tf.keras.initializers.Constant":
"tf.compat.v1.keras.initializers.Constant",
"tf.keras.initializers.VarianceScaling":
"tf.compat.v1.keras.initializers.VarianceScaling",
"tf.keras.initializers.Orthogonal":
"tf.compat.v1.keras.initializers.Orthogonal",
"tf.keras.initializers.orthogonal":
"tf.compat.v1.keras.initializers.orthogonal",
"tf.keras.initializers.Identity":
"tf.compat.v1.keras.initializers.Identity",
"tf.keras.initializers.identity":
"tf.compat.v1.keras.initializers.identity",
"tf.keras.initializers.glorot_uniform":
"tf.compat.v1.keras.initializers.glorot_uniform",
"tf.keras.initializers.glorot_normal":
"tf.compat.v1.keras.initializers.glorot_normal",
"tf.keras.initializers.lecun_normal":
"tf.compat.v1.keras.initializers.lecun_normal",
"tf.keras.initializers.lecun_uniform":
"tf.compat.v1.keras.initializers.lecun_uniform",
"tf.keras.initializers.he_normal":
"tf.compat.v1.keras.initializers.he_normal",
"tf.keras.initializers.he_uniform":
"tf.compat.v1.keras.initializers.he_uniform",
"tf.keras.initializers.TruncatedNormal":
"tf.compat.v1.keras.initializers.TruncatedNormal",
"tf.keras.initializers.truncated_normal":
"tf.compat.v1.keras.initializers.truncated_normal",
"tf.keras.initializers.RandomUniform":
"tf.compat.v1.keras.initializers.RandomUniform",
"tf.keras.initializers.uniform":
"tf.compat.v1.keras.initializers.uniform",
"tf.keras.initializers.random_uniform":
"tf.compat.v1.keras.initializers.random_uniform",
"tf.keras.initializers.RandomNormal":
"tf.compat.v1.keras.initializers.RandomNormal",
"tf.keras.initializers.normal":
"tf.compat.v1.keras.initializers.normal",
"tf.keras.initializers.random_normal":
"tf.compat.v1.keras.initializers.random_normal",
"tf.zeros_initializer":
"tf.compat.v1.zeros_initializer",
"tf.initializers.zeros":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.ones_initializer",
"tf.initializers.ones":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.constant_initializer",
"tf.initializers.constant":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.random_uniform_initializer",
"tf.initializers.random_uniform":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.random_normal_initializer",
"tf.initializers.random_normal":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.truncated_normal_initializer",
"tf.initializers.truncated_normal":
"tf.compat.v1.initializers.truncated_normal",
"tf.variance_scaling_initializer":
"tf.compat.v1.variance_scaling_initializer",
"tf.initializers.variance_scaling":
"tf.compat.v1.initializers.variance_scaling",
"tf.orthogonal_initializer":
"tf.compat.v1.orthogonal_initializer",
"tf.initializers.orthogonal":
"tf.compat.v1.initializers.orthogonal",
"tf.glorot_uniform_initializer":
"tf.compat.v1.glorot_uniform_initializer",
"tf.initializers.glorot_uniform":
"tf.compat.v1.initializers.glorot_uniform",
"tf.glorot_normal_initializer":
"tf.compat.v1.glorot_normal_initializer",
"tf.initializers.glorot_normal":
"tf.compat.v1.initializers.glorot_normal",
"tf.initializers.identity":
"tf.compat.v1.initializers.identity",
"tf.initializers.lecun_normal":
"tf.compat.v1.initializers.lecun_normal",
"tf.initializers.lecun_uniform":
"tf.compat.v1.initializers.lecun_uniform",
"tf.initializers.he_normal":
"tf.compat.v1.initializers.he_normal",
"tf.initializers.he_uniform":
"tf.compat.v1.initializers.he_uniform",
"tf.data.experimental.map_and_batch_with_legacy_function":
"tf.compat.v1.data.experimental.map_and_batch_with_legacy_function",
"tf.nn.conv2d_backprop_input":
"tf.nn.conv2d_transpose",
"tf.test.compute_gradient":
"tf.compat.v1.test.compute_gradient",
"tf.floor_div":
"tf.math.floordiv",
"tf.where":
"tf.compat.v1.where",
"tf.where_v2":
"tf.compat.v2.where",
}
# pylint: enable=line-too-long
symbol_renames = renames_v2.renames
symbol_renames.update(manual_symbol_renames)
| tensorflow-master | tensorflow/tools/compatibility/all_renames_v2.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.compat.v1.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.compat.v1.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.decode_raw':
'tf.io.decode_raw',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floor_div':
'tf.compat.v1.floor_div',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.get_potentially_supported_ops':
'tf.compat.v1.lite.experimental.get_potentially_supported_ops',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.make_tensor_proto':
'tf.compat.v1.make_tensor_proto',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mod':
'tf.math.mod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.nn.ctc_loss',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.experimental.disable_mixed_precision_graph_rewrite':
'tf.compat.v1.train.experimental.disable_mixed_precision_graph_rewrite',
'tf.train.experimental.enable_mixed_precision_graph_rewrite':
'tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.where_v2':
'tf.where',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
| tensorflow-master | tensorflow/tools/compatibility/renames_v2.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.tools.compatibility import ast_edits
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.batch_matmul": {
"adj_x": "adjoint_a",
"adj_y": "adjoint_b",
},
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.symbol_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
"tf.op_scope": "tf.name_scope",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"
],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"
],
"tf.nn.sigmoid_cross_entropy_with_logits": ["logits", "labels", "name"],
"tf.op_scope": ["values", "name", "default_name"],
}
# Warnings that should be printed if corresponding functions are used.
self.function_warnings = {
"tf.reverse": (
ast_edits.ERROR,
"tf.reverse has had its argument semantics changed "
"significantly. The converter cannot detect this reliably, so "
"you need to inspect this usage manually.\n"),
}
self.module_deprecations = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader in safety mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
class TfUpgradeV2SafetyTest(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2_safety.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testContribWarning(self):
text = "tf.contrib.foo()"
_, report, _, _ = self._upgrade(text)
expected_info = "tf.contrib will not be distributed"
self.assertIn(expected_info, report)
def testTensorFlowImport(self):
text = "import tensorflow as tf"
expected_text = ("import tensorflow.compat.v1 as tf" + os.linesep +
"tf.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow as tf, other_import as y"
expected_text = ("import tensorflow.compat.v1 as tf, other_import as y" +
os.linesep + "tf.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow"
expected_text = ("import tensorflow.compat.v1 as tensorflow" + os.linesep +
"tensorflow.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo"
expected_text = "import tensorflow.compat.v1.foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo as bar"
expected_text = "import tensorflow.compat.v1.foo as bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportInIndent(self):
text = """
try:
import tensorflow as tf # import line
tf.ones([4, 5])
except AttributeError:
pass
"""
expected_text = """
try:
import tensorflow.compat.v1 as tf # import line
tf.disable_v2_behavior()
tf.ones([4, 5])
except AttributeError:
pass
"""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowFromImport(self):
text = "from tensorflow import foo"
expected_text = "from tensorflow.compat.v1 import foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow.foo import bar"
expected_text = "from tensorflow.compat.v1.foo import bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow import *"
expected_text = "from tensorflow.compat.v1 import *"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportAlreadyHasCompat(self):
text = "import tensorflow.compat.v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "import tensorflow.compat.v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow.compat import v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def testTensorFlowDontChangeContrib(self):
text = "import tensorflow.contrib as foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow import contrib"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_v2_safety_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_edits which is used in tf upgraders.
All of the tests assume that we want to change from an API containing
import foo as f
def f(a, b, kw1, kw2): ...
def g(a, b, kw1, c, kw1_alias): ...
def g2(a, b, kw1, c, d, kw1_alias): ...
def h(a, kw1, kw2, kw1_alias, kw2_alias): ...
and the changes to the API consist of renaming, reordering, and/or removing
arguments. Thus, we want to be able to generate changes to produce each of the
following new APIs:
import bar as f
def f(a, b, kw1, kw3): ...
def f(a, b, kw2, kw1): ...
def f(a, b, kw3, kw1): ...
def g(a, b, kw1, c): ...
def g(a, b, c, kw1): ...
def g2(a, b, kw1, c, d): ...
def g2(a, b, c, d, kw1): ...
def h(a, kw1, kw2): ...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
class ModuleDeprecationSpec(ast_edits.NoUpdateSpec):
"""A specification which deprecates 'a.b'."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.module_deprecations.update({"a.b": (ast_edits.ERROR, "a.b is evil.")})
class RenameKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets renamed to kw3.
The new API is
def f(a, b, kw1, kw3): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_renames()
def update_renames(self):
self.function_keyword_renames["f"] = {"kw2": "kw3"}
class ReorderKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets moved in front of kw1.
The new API is
def f(a, b, kw2, kw1): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_reorders()
def update_reorders(self):
# Note that these should be in the old order.
self.function_reorders["f"] = ["a", "b", "kw1", "kw2"]
class ReorderAndRenameKeywordSpec(ReorderKeywordSpec, RenameKeywordSpec):
"""A specification where kw2 gets moved in front of kw1 and is changed to kw3.
The new API is
def f(a, b, kw3, kw1): ...
"""
def __init__(self):
ReorderKeywordSpec.__init__(self)
RenameKeywordSpec.__init__(self)
self.update_renames()
self.update_reorders()
class RemoveDeprecatedAliasKeyword(ast_edits.NoUpdateSpec):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, kw1, c): ...
def g2(a, b, kw1, c, d): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["g"] = {"kw1_alias": "kw1"}
self.function_keyword_renames["g2"] = {"kw1_alias": "kw1"}
class RemoveDeprecatedAliasAndReorderRest(RemoveDeprecatedAliasKeyword):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, c, kw1): ...
def g2(a, b, c, d, kw1): ...
"""
def __init__(self):
RemoveDeprecatedAliasKeyword.__init__(self)
# Note that these should be in the old order.
self.function_reorders["g"] = ["a", "b", "kw1", "c"]
self.function_reorders["g2"] = ["a", "b", "kw1", "c", "d"]
class RemoveMultipleKeywordArguments(ast_edits.NoUpdateSpec):
"""A specification where both keyword aliases are removed from h.
The new API is
def h(a, kw1, kw2): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["h"] = {
"kw1_alias": "kw1",
"kw2_alias": "kw2",
}
class RenameImports(ast_edits.NoUpdateSpec):
"""Specification for renaming imports."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.import_renames = {
"foo": ast_edits.ImportRename(
"bar",
excluded_prefixes=["foo.baz"])
}
class TestAstEdits(test_util.TensorFlowTestCase):
def _upgrade(self, spec, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(spec)
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return (count, report, errors), out_file.getvalue()
def testModuleDeprecation(self):
text = "a.b.c(a.b.x)"
(_, _, errors), new_text = self._upgrade(ModuleDeprecationSpec(), text)
self.assertEqual(text, new_text)
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:0", errors[0])
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:6", errors[1])
def testNoTransformIfNothingIsSupplied(self):
text = "f(a, b, kw1=c, kw2=d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
text = "f(a, b, c, d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
def testKeywordRename(self):
"""Test that we get the expected result if renaming kw2 to kw3."""
text = "f(a, b, kw1=c, kw2=d)\n"
expected = "f(a, b, kw1=c, kw3=d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, expected)
self.assertNotIn("Manual check required", report)
# No keywords specified, no reordering, so we should get input as output
text = "f(a, b, c, d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, text)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testKeywordReorderWithParens(self):
"""Test that we get the expected result if there are parens around args."""
text = "f((a), ( ( b ) ))\n"
acceptable_outputs = [
# No change is a valid output
text,
# Also cases where all arguments are fully specified are allowed
"f(a=(a), b=( ( b ) ))\n",
# Making the parens canonical is ok
"f(a=(a), b=((b)))\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testKeywordReorder(self):
"""Test that we get the expected result if kw2 is now before kw1."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
# No change is a valid output
text,
# Just reordering the kw.. args is also ok
"f(a, b, kw2=d, kw1=c)\n",
# Also cases where all arguments are fully specified are allowed
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, b, *args)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
def testKeywordReorderAndRename(self):
"""Test that we get the expected result if kw2 is renamed and moved."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
"f(a, b, kw3=d, kw1=c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args, kw1=c)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testRemoveDeprecatedKeywordAlias(self):
"""Test that we get the expected result if a keyword alias is removed."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
# Not using deprecated alias, so original is ok
text,
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# No keyword used, should be no change
text = "g(a, b, x, c)\n"
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertEqual(new_text, text)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder(self):
"""Test for when a keyword alias is removed and args are reordered."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g(a, b, x, c)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g(a, b, c, x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder2(self):
"""Same as testRemoveDeprecatedKeywordAndReorder but on g2 (more args)."""
text = "g2(a, b, kw1=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g2(a, b, x, c, d)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g2(a, b, c, d, x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g2(a, b, kw1_alias=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's not in order
text = "g2(a, b, d=d, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a, b, d=d, c=c, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, d=d, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveMultipleKeywords(self):
"""Remove multiple keywords at once."""
# Not using deprecated keywords -> no rename
text = "h(a, kw1=x, kw2=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Using positional arguments (in proper order) -> no change
text = "h(a, x, y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Use only the old names, in order
text = "h(a, kw1_alias=x, kw2_alias=y)\n"
acceptable_outputs = [
"h(a, x, y)\n",
"h(a, kw1=x, kw2=y)\n",
"h(a=a, kw1=x, kw2=y)\n",
"h(a, kw2=y, kw1=x)\n",
"h(a=a, kw2=y, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Use only the old names, in reverse order, should give one of same outputs
text = "h(a, kw2_alias=y, kw1_alias=x)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Mix old and new names
text = "h(a, kw1=x, kw2_alias=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
def testUnrestrictedFunctionWarnings(self):
class FooWarningSpec(ast_edits.NoUpdateSpec):
"""Usages of function attribute foo() prints out a warning."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_warnings = {"*.foo": (ast_edits.WARNING, "not good")}
texts = ["object.foo()", "get_object().foo()",
"get_object().foo()", "object.foo().bar()"]
for text in texts:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertIn("not good", report)
# Note that foo() won't result in a warning, because in this case foo is
# not an attribute, but a name.
false_alarms = ["foo", "foo()", "foo.bar()", "obj.run_foo()", "obj.foo"]
for text in false_alarms:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertNotIn("not good", report)
def testFullNameNode(self):
t = ast_edits.full_name_node("a.b.c")
self.assertEquals(
ast.dump(t),
"Attribute(value=Attribute(value=Name(id='a', ctx=Load()), attr='b', "
"ctx=Load()), attr='c', ctx=Load())"
)
def testImport(self):
# foo should be renamed to bar.
text = "import foo as f"
expected_text = "import bar as f"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo"
expected_text = "import bar as foo"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test"
expected_text = "import bar.test"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test as t"
expected_text = "import bar.test as t"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo as f, a as b"
expected_text = "import bar as f, a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testFromImport(self):
# foo should be renamed to bar.
text = "from foo import a"
expected_text = "from bar import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo.a import b"
expected_text = "from bar.a import b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import *"
expected_text = "from bar import *"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import a, b"
expected_text = "from bar import a, b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImport_NoChangeNeeded(self):
text = "import bar as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testFromImport_NoChangeNeeded(self):
text = "from bar import a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testExcludedImport(self):
# foo.baz module is excluded from changes.
text = "import foo.baz"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "import foo.baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo import baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo.baz import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testMultipleImports(self):
text = "import foo.bar as a, foo.baz as b, foo.baz.c, foo.d"
expected_text = "import bar.bar as a, foo.baz as b, foo.baz.c, bar.d"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import baz, a, c"
expected_text = """from foo import baz
from bar import a, c"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImportInsideFunction(self):
text = """
def t():
from c import d
from foo import baz, a
from e import y
"""
expected_text = """
def t():
from c import d
from foo import baz
from bar import a
from e import y
"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/ast_edits_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* to 2.0 TensorFlow using SAFETY mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
self.function_keyword_renames = {}
self.symbol_renames = {}
self.change_to_function = {}
self.function_reorders = {}
self.function_warnings = {}
self.function_transformers = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
# List module renames. Right now, we just support renames from a module
# names that don't contain '.'.
self.import_renames = {
"tensorflow": ast_edits.ImportRename(
"tensorflow.compat.v1",
excluded_prefixes=["tensorflow.contrib",
"tensorflow.flags",
"tensorflow.compat.v1",
"tensorflow.compat.v2"])
}
self.inserts_after_imports = {
("tensorflow", None): ["tensorflow.disable_v2_behavior()"],
("tensorflow", "tf"): ["tf.disable_v2_behavior()"],
}
# TODO(kaftan,annarev): specify replacement from TensorFlow import to
# compat.v1 import.
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_v2_safety.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for all_renames_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import all_renames_v2
class AllRenamesV2Test(test_util.TensorFlowTestCase):
def test_no_identity_renames(self):
identity_renames = [
old_name
for old_name, new_name in six.iteritems(all_renames_v2.symbol_renames)
if old_name == new_name
]
self.assertEmpty(identity_renames)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/all_renames_v2_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = name.split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = call_str.find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:call_str.find("(")]
args = call_str[open_paren_index+1:close_paren_index].split(",")
args = [arg.split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + name)
else:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.assertFalse(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = full_dict.keys()
for method_name in method_names:
args = full_dict[method_name].keys()
# special case for optimizer methods
if method_name.startswith("*."):
method = method_name.replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if method_name.startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testIntializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
text = "tf.contrib.layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "tf.contrib.layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = ("tf.contrib.layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = ("tf.contrib.layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.compat.v1.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.compat.v1.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.keras.experimental.export_saved_model(model, './saved_models')\n"
"tf.keras.experimental.load_from_saved_model(saved_model_path)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in ["assert_greater", "assert_equal", "assert_none_equal",
"assert_less", "assert_negative", "assert_positive",
"assert_non_negative", "assert_non_positive", "assert_near",
"assert_less", "assert_less_equal", "assert_greater",
"assert_greater_equal", "assert_integer", "assert_type",
"assert_scalar"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags has been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags has been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated", report)
def test_contrib_layers_layer_norm_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.layers.layer_norm")
self.assertIn("`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook", "stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.experimental.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_v2_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = name.split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def uses_star_args_in_call(node):
"""Check if an ast.Call node uses arbitrary-length positional *args.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for an *args usage in python 3.5+
for arg in node.args:
if isinstance(arg, ast.Starred):
return True
else:
if node.starargs:
return True
return False
def uses_star_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for a **kwarg usage in python 3.5+
for keyword in node.keywords:
if keyword.arg is None:
return True
else:
if node.kwargs:
return True
return False
def uses_star_args_or_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length *args or **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return any produced logs and errors."""
return [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
class NoUpdateSpec(APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = {}
self.function_transformers = {}
self.import_renames = {}
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = message.replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = message.replace("<function name>", whole_name)
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + name)
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
variadic_args = uses_star_args_or_kwargs_in_call(node)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg) or variadic_args
if present:
warned = True
warning_message = warning.replace("<function name>", full_name or name)
template = "%s called with %s argument, requires manual check: %s"
if variadic_args:
template = ("%s called with *args or **kwargs that may include %s, "
"requires manual check: %s")
self.add_log(level, node.lineno, node.col_offset,
template % (full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
if uses_star_args_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"re-ordering the call arguments, but it was passed "
"variable-length positional *args. The upgrade "
"script cannot handle these automatically." % full_name)
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
if uses_star_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"renaming or removing call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
if transformers:
if uses_star_args_or_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"modifying call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
inserts_after_imports = getattr(self._api_change_spec,
"inserts_after_imports", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
import_first_component = import_alias.name.split(".")[0]
import_rename_spec = import_renames.get(import_first_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
new_aliases.append(import_alias) # no change needed
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_first_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
# Insert any followup lines that should happen after this import.
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(
parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
# Insert a newline after the import if necessary
old_suffix = pasta.base.formatting.get(node, "suffix")
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, "suffix", old_suffix + os.linesep)
# Apply indentation to new node.
pasta.base.formatting.set(new_line_node, "prefix",
pasta.base.formatting.get(node, "prefix"))
pasta.base.formatting.set(new_line_node, "suffix", os.linesep)
self.add_log(
INFO, node.lineno, node.col_offset,
"Adding `%s` after import of %s" %
(new_line_node, import_alias.name))
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = from_import.split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class AnalysisResult(object):
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
class APIAnalysisSpec(object):
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
class PastaAnalyzeVisitor(_PastaEditVisitor):
"""AST Visitor that looks for specific API usage without editing anything.
This is used before any rewriting is done to detect if any symbols are used
that require changing imports or disabling rewriting altogether.
"""
def __init__(self, api_analysis_spec):
super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())
self._api_analysis_spec = api_analysis_spec
self._results = [] # Holds AnalysisResult objects
@property
def results(self):
return self._results
def add_result(self, analysis_result):
self._results.append(analysis_result)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
full_name = self._get_full_name(node)
if full_name:
detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias)
full_import = (import_alias.name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
for import_alias in node.names:
# Detect based on full import name(to & as)
full_module_name = "%s.%s" % (from_import, import_alias.name)
full_import = (full_module_name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return in_filename + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
self._api_change_spec.clear_preprocessing()
logs = [self.format_log(log, None) for log in (preprocess_logs +
visitor.log)]
errors = [self.format_log(error, in_filename)
for error in (preprocess_errors +
visitor.warnings_and_errors)]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
text += "\n".join(log) + "\n"
text += "-" * 80 + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [os.path.join(dir_name,
f) for f in file_list if f.endswith(".py")]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for path in files_to_process:
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| tensorflow-master | tensorflow/tools/compatibility/ast_edits.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import copy
import functools
import sys
import pasta
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." + version
+ "` was directly imported as `tf`.")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): VersionedTFImport("compat.v1"),
("tensorflow.compat.v2", "tf"): VersionedTFImport("compat.v2"),
}
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
contrib_estimator_head_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.estimator.*_head` has been "
"deprecated, and its implementation has been integrated with "
"`tf.estimator.*Head` in TensorFlow 2.0. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->experimental_run_v2, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.experimental.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.distribute.experimental.ParameterServerStrategy (multi machine) "
" and tf.distribute.experimental.CentralStorageStrategy (one machine). "
"Note the changes in constructors. " + distribute_strategy_api_changes)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.estimator.binary_classification_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.logistic_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_class_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_label_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.poisson_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.regression_head":
contrib_estimator_head_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4): (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
# TODO(annarev): this should be updated to tf.keras.losses.Reduction.SUM
# once b/125525822 is fixed.
default_value = "tf.compat.v1.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_v2.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 1.0.
We also test whether a converted file is executable. test_file_v0_11.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.mul(a, b)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.mul` to `tf.multiply`"))
def testRename(self):
text = "tf.mul(a, tf.sub(b, c))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.multiply(a, tf.subtract(b, c))\n")
def testRenamePack(self):
text = "tf.pack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.stack(a)\n")
text = "tf.unpack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.unstack(a)\n")
def testReorder(self):
text = "tf.concat(a, b)\ntf.split(a, b, c)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n"
"tf.split(axis=a, num_or_size_splits=b, value=c)\n")
def testConcatReorderWithKeywordArgs(self):
text = "tf.concat(concat_dim=a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
text = "tf.concat(values=b, concat_dim=a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(values=b, axis=a)\n")
text = "tf.concat(a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
def testConcatReorderNested(self):
text = "tf.concat(a, tf.concat(c, d))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.concat(axis=a, values=tf.concat(axis=c, values=d))\n")
def testInitializers(self):
text = ("tf.zeros_initializer;tf.zeros_initializer ()\n"
"tf.ones_initializer;tf.ones_initializer ()\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.zeros_initializer();tf.zeros_initializer ()\n"
"tf.ones_initializer();tf.ones_initializer ()\n")
def testKeyword(self):
text = "tf.reduce_any(a, reduction_indices=[1, 2])\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.reduce_any(a, axis=[1, 2])\n")
def testComplexExpression(self):
text = "(foo + bar)[a].word()"
_ = self._upgrade(text)
def testReverse(self):
text = "tf.reverse(a, b)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, new_text)
self.assertIn("tf.reverse requires manual check", errors[0])
def testListComprehension(self):
def _test(input, output): # pylint: disable=redefined-builtin
_, unused_report, errors, new_text = self._upgrade(input)
self.assertEqual(new_text, output)
_test("tf.concat(0, \t[x for x in y])\n",
"tf.concat(axis=0, \tvalues=[x for x in y])\n")
_test("tf.concat(0,[x for x in y])\n",
"tf.concat(axis=0,values=[x for x in y])\n")
_test("tf.concat(0,[\nx for x in y])\n",
"tf.concat(axis=0,values=[\nx for x in y])\n")
_test("tf.concat(0,[\n \tx for x in y])\n",
"tf.concat(axis=0,values=[\n \tx for x in y])\n")
# TODO(aselle): Explicitly not testing command line interface and process_tree
# for now, since this is a one off utility.
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.mul(a, b)\n"
upgraded = "tf.multiply(a, b)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/tf_upgrade_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
reorders = {
'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name', 'block_shape'],
'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'],
'tf.cond': ['pred', 'true_fn', 'false_fn', 'strict', 'name', 'fn1', 'fn2'],
'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype', 'dtype_hint'],
'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.estimator.BaselineClassifier': ['model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'config', 'loss_reduction'],
'tf.estimator.BaselineRegressor': ['model_dir', 'label_dimension', 'weight_column', 'optimizer', 'config', 'loss_reduction'],
'tf.estimator.DNNClassifier': ['hidden_units', 'feature_columns', 'model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'activation_fn', 'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'],
'tf.estimator.DNNLinearCombinedClassifier': ['model_dir', 'linear_feature_columns', 'linear_optimizer', 'dnn_feature_columns', 'dnn_optimizer', 'dnn_hidden_units', 'dnn_activation_fn', 'dnn_dropout', 'n_classes', 'weight_column', 'label_vocabulary', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse_combiner'],
'tf.estimator.DNNLinearCombinedRegressor': ['model_dir', 'linear_feature_columns', 'linear_optimizer', 'dnn_feature_columns', 'dnn_optimizer', 'dnn_hidden_units', 'dnn_activation_fn', 'dnn_dropout', 'label_dimension', 'weight_column', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse_combiner'],
'tf.estimator.DNNRegressor': ['hidden_units', 'feature_columns', 'model_dir', 'label_dimension', 'weight_column', 'optimizer', 'activation_fn', 'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'],
'tf.estimator.LinearClassifier': ['feature_columns', 'model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'config', 'partitioner', 'warm_start_from', 'loss_reduction', 'sparse_combiner'],
'tf.estimator.LinearRegressor': ['feature_columns', 'model_dir', 'label_dimension', 'weight_column', 'optimizer', 'config', 'partitioner', 'warm_start_from', 'loss_reduction', 'sparse_combiner'],
'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'],
'tf.gradients': ['ys', 'xs', 'grad_ys', 'name', 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method', 'stop_gradients', 'unconnected_gradients'],
'tf.hessians': ['ys', 'xs', 'name', 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method'],
'tf.image.sample_distorted_bounding_box': ['image_size', 'bounding_boxes', 'seed', 'seed2', 'min_object_covered', 'aspect_ratio_range', 'area_range', 'max_attempts', 'use_image_if_no_bounding_boxes', 'name'],
'tf.initializers.uniform_unit_scaling': ['factor', 'seed', 'dtype'],
'tf.io.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.io.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.io.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.linalg.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.math.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.math.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.math.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.nn.avg_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.avg_pool2d': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.conv1d': ['value', 'filters', 'stride', 'padding', 'use_cudnn_on_gpu', 'data_format', 'name', 'input', 'dilations'],
'tf.nn.conv2d': ['input', 'filter', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.conv2d_backprop_input': ['input_sizes', 'filter', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format', 'filters', 'dilations'],
'tf.nn.crelu': ['features', 'name', 'axis'],
'tf.nn.ctc_beam_search_decoder': ['inputs', 'sequence_length', 'beam_width', 'top_paths', 'merge_repeated'],
'tf.nn.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.embedding_lookup': ['params', 'ids', 'partition_strategy', 'name', 'validate_indices', 'max_norm'],
'tf.nn.embedding_lookup_sparse': ['params', 'sp_ids', 'sp_weights', 'partition_strategy', 'name', 'combiner', 'max_norm'],
'tf.nn.fractional_avg_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.fractional_max_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.nn.max_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims', 'keepdims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format', 'dilations'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.softmax_cross_entropy_with_logits': ['_sentinel', 'labels', 'logits', 'dim', 'name', 'axis'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.nn.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims', 'keepdims'],
'tf.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'],
'tf.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.quantize_v2': ['input', 'min_range', 'max_range', 'T', 'mode', 'name', 'round_mode'],
'tf.random.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reverse_sequence': ['input', 'seq_lengths', 'seq_axis', 'batch_axis', 'name', 'seq_dim', 'batch_dim'],
'tf.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.shape': ['input', 'name', 'out_type'],
'tf.size': ['input', 'name', 'out_type'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse.reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'],
'tf.sparse_reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.strings.length': ['input', 'name', 'unit'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.test.assert_equal_graph_def': ['actual', 'expected', 'checkpoint_v2'],
'tf.train.sdca_fprint': ['input', 'name'],
'tf.train.sdca_optimizer': ['sparse_example_indices', 'sparse_feature_indices', 'sparse_feature_values', 'dense_features', 'example_weights', 'example_labels', 'sparse_indices', 'sparse_weights', 'dense_weights', 'example_state_data', 'loss_type', 'l1', 'l2', 'num_loss_partitions', 'num_inner_iterations', 'adaptative', 'name'],
'tf.train.sdca_shrink_l1': ['weights', 'l1', 'l2', 'name'],
'tf.transpose': ['a', 'perm', 'name', 'conjugate'],
'tf.tuple': ['tensors', 'name', 'control_inputs'],
'tf.uniform_unit_scaling_initializer': ['factor', 'seed', 'dtype'],
'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure']
}
| tensorflow-master | tensorflow/tools/compatibility/reorders_v2.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A module to support operations on ipynb files"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
import shutil
import tempfile
CodeLine = collections.namedtuple("CodeLine", ["cell_number", "code"])
def is_python(cell):
"""Checks if the cell consists of Python code."""
return (cell["cell_type"] == "code" # code cells only
and cell["source"] # non-empty cells
and not cell["source"][0].startswith("%%")) # multiline eg: %%bash
def process_file(in_filename, out_filename, upgrader):
"""The function where we inject the support for ipynb upgrade."""
print("Extracting code lines from original notebook")
raw_code, notebook = _get_code(in_filename)
raw_lines = [cl.code for cl in raw_code]
# The function follows the original flow from `upgrader.process_fil`
with tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
processed_file, new_file_content, log, process_errors = (
upgrader.update_string_pasta("\n".join(raw_lines), in_filename))
if temp_file and processed_file:
new_notebook = _update_notebook(notebook, raw_code,
new_file_content.split("\n"))
json.dump(new_notebook, temp_file)
else:
raise SyntaxError(
"Was not able to process the file: \n%s\n" % "".join(log))
files_processed = processed_file
report_text = upgrader._format_log(log, in_filename, out_filename)
errors = process_errors
shutil.move(temp_file.name, out_filename)
return files_processed, report_text, errors
def skip_magic(code_line, magic_list):
"""Checks if the cell has magic, that is not Python-based.
Args:
code_line: A line of Python code
magic_list: A list of jupyter "magic" exceptions
Returns:
If the line jupyter "magic" line, not Python line
>>> skip_magic('!ls -laF', ['%', '!', '?'])
True
"""
for magic in magic_list:
if code_line.startswith(magic):
return True
return False
def check_line_split(code_line):
r"""Checks if a line was split with `\`.
Args:
code_line: A line of Python code
Returns:
If the line was split with `\`
>>> skip_magic("!gcloud ml-engine models create ${MODEL} \\\n")
True
"""
return re.search(r"\\\s*\n$", code_line)
def _get_code(input_file):
"""Loads the ipynb file and returns a list of CodeLines."""
raw_code = []
with open(input_file) as in_file:
notebook = json.load(in_file)
cell_index = 0
for cell in notebook["cells"]:
if is_python(cell):
cell_lines = cell["source"]
is_line_split = False
for line_idx, code_line in enumerate(cell_lines):
# Sometimes, jupyter has more than python code
# Idea is to comment these lines, for upgrade time
if skip_magic(code_line, ["%", "!", "?"]) or is_line_split:
# Found a special character, need to "encode"
code_line = "###!!!" + code_line
# if this cell ends with `\` -> skip the next line
is_line_split = check_line_split(code_line)
if is_line_split:
is_line_split = check_line_split(code_line)
# Sometimes, people leave \n at the end of cell
# in order to migrate only related things, and make the diff
# the smallest -> here is another hack
if (line_idx == len(cell_lines) - 1) and code_line.endswith("\n"):
code_line = code_line.replace("\n", "###===")
# sometimes a line would start with `\n` and content after
# that's the hack for this
raw_code.append(
CodeLine(cell_index,
code_line.rstrip().replace("\n", "###===")))
cell_index += 1
return raw_code, notebook
def _update_notebook(original_notebook, original_raw_lines, updated_code_lines):
"""Updates notebook, once migration is done."""
new_notebook = copy.deepcopy(original_notebook)
# validate that the number of lines is the same
assert len(original_raw_lines) == len(updated_code_lines), \
("The lengths of input and converted files are not the same: "
"{} vs {}".format(len(original_raw_lines), len(updated_code_lines)))
code_cell_idx = 0
for cell in new_notebook["cells"]:
if not is_python(cell):
continue
applicable_lines = [
idx for idx, code_line in enumerate(original_raw_lines)
if code_line.cell_number == code_cell_idx
]
new_code = [updated_code_lines[idx] for idx in applicable_lines]
cell["source"] = "\n".join(new_code).replace("###!!!", "").replace(
"###===", "\n")
code_cell_idx += 1
return new_notebook
| tensorflow-master | tensorflow/tools/compatibility/ipynb.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0."""
def setUp(self):
tf.compat.v1.enable_v2_behavior()
def testRenames(self):
self.assertAllClose(1.04719755, tf.acos(0.5))
self.assertAllClose(0.5, tf.rsqrt(4.0))
def testSerializeSparseTensor(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[2])
with self.cached_session():
serialized_sp = tf.serialize_sparse(sp_input, 'serialize_name', tf.string)
self.assertEqual((3,), serialized_sp.shape)
self.assertTrue(serialized_sp[0].numpy()) # check non-empty
def testSerializeManySparse(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[0, 1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[1, 2])
with self.cached_session():
serialized_sp = tf.serialize_many_sparse(
sp_input, 'serialize_name', tf.string)
self.assertEqual((1, 3), serialized_sp.shape)
def testArgMaxMin(self):
self.assertAllClose(
[1],
tf.argmax([[1, 3, 2]], name='abc', dimension=1))
self.assertAllClose(
[0, 0, 0],
tf.argmax([[1, 3, 2]], dimension=0))
self.assertAllClose(
[0],
tf.argmin([[1, 3, 2]], name='abc', dimension=1))
def testSoftmaxCrossEntropyWithLogits(self):
out = tf.nn.softmax_cross_entropy_with_logits(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
out = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
def testLinearClassifier(self):
feature_column = tf.feature_column.numeric_column(
'feature', shape=(1,))
classifier = tf.estimator.LinearClassifier(
n_classes=2, feature_columns=[feature_column])
data = {'feature': [1, 20, 3]}
target = [0, 1, 0]
classifier.train(
input_fn=lambda: (data, target),
steps=100)
scores = classifier.evaluate(
input_fn=lambda: (data, target),
steps=100)
self.assertGreater(scores['accuracy'], 0.99)
def testUniformUnitScalingInitializer(self):
init = tf.initializers.uniform_unit_scaling(0.5, seed=1)
self.assertArrayNear(
[-0.45200047, 0.72815341],
init((2,)).numpy(),
err=1e-6)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/testdata/test_file_v1_12.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 1.0.
This test will not run in current TensorFlow, but did run in 0.11.
This file is intended to be converted by a genrule() that uses the converter
so that a 1.0 compatible version of this file is generated. That is run as
a unit test if the converter is successful.
"""
@test_util.run_v1_only("b/120545219")
def testArgRenames(self):
with self.cached_session():
a = [[1., 2., 3.], [4., 5., 6.]]
b = [[True, False, False], [False, True, True]]
dim0 = [1]
dim1 = [1]
self.assertAllEqual(
tf.reduce_any(
b, reduction_indices=dim0).eval(), [True, True])
self.assertAllEqual(
tf.reduce_all(
b, reduction_indices=[0]).eval(), [False, False, False])
self.assertAllEqual(
tf.reduce_all(
b, reduction_indices=dim1).eval(), [False, False])
self.assertAllEqual(
tf.reduce_sum(
a, reduction_indices=[1]).eval(), [6., 15.])
self.assertAllEqual(
tf.reduce_sum(
a, reduction_indices=[0, 1]).eval(), 21.0)
self.assertAllEqual(tf.reduce_sum(a, [0, 1]).eval(), 21.0)
self.assertAllEqual(
tf.reduce_prod(
a, reduction_indices=[1]).eval(), [6., 120.])
self.assertAllEqual(
tf.reduce_prod(
a, reduction_indices=[0, 1]).eval(), 720.0)
self.assertAllEqual(tf.reduce_prod(a, [0, 1]).eval(), 720.0)
self.assertAllEqual(
tf.reduce_mean(
a, reduction_indices=[1]).eval(), [2., 5.])
self.assertAllEqual(
tf.reduce_mean(
a, reduction_indices=[0, 1]).eval(), 3.5)
self.assertAllEqual(tf.reduce_mean(a, [0, 1]).eval(), 3.5)
self.assertAllEqual(
tf.reduce_min(
a, reduction_indices=[1]).eval(), [1., 4.])
self.assertAllEqual(
tf.reduce_min(
a, reduction_indices=[0, 1]).eval(), 1.0)
self.assertAllEqual(tf.reduce_min(a, [0, 1]).eval(), 1.0)
self.assertAllEqual(
tf.reduce_max(
a, reduction_indices=[1]).eval(), [3., 6.])
self.assertAllEqual(
tf.reduce_max(
a, reduction_indices=[0, 1]).eval(), 6.0)
self.assertAllEqual(tf.reduce_max(a, [0, 1]).eval(), 6.0)
self.assertAllClose(tf.reduce_logsumexp(a, reduction_indices=[1]).eval(),
[3.40760589, 6.40760612])
self.assertAllClose(
tf.reduce_logsumexp(a, reduction_indices=[0, 1]).eval(),
6.45619344711)
self.assertAllClose(
tf.reduce_logsumexp(a, [0, 1]).eval(), 6.45619344711)
self.assertAllEqual(
tf.expand_dims([[1, 2], [3, 4]], axis=1).eval(),
[[[1, 2]], [[3, 4]]])
@test_util.run_v1_only("b/120545219")
def testArgMinMax(self):
with self.cached_session():
self.assertAllEqual(
tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
[0, 2])
self.assertAllEqual(
tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
[0, 1, 1])
self.assertAllEqual(
tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
[2, 0])
self.assertAllEqual(
tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
[1, 0, 0])
@test_util.run_v1_only("b/120545219")
def testExpandAndSqueeze(self):
with self.cached_session():
# TODO(aselle): sparse_split, sparse_reduce_sum,
# sparse_reduce_sum_sparse, reduce_join
a = [[1, 2, 3]]
self.assertAllEqual(tf.expand_dims(tf.squeeze(a, [0]), 0).eval(),
a)
self.assertAllEqual(tf.squeeze(tf.expand_dims(a, 1), [1]).eval(),
a)
self.assertAllEqual(
tf.expand_dims(tf.squeeze([[1, 2, 3]], axis=[0]), dim=0).eval(), a)
self.assertAllEqual(
tf.squeeze(tf.expand_dims([[1, 2, 3]], dim=1), axis=[1]).eval(), a)
self.assertAllEqual(
tf.squeeze(tf.expand_dims([[1, 2, 3]], dim=1), axis=[1]).eval(), a)
@test_util.run_v1_only("b/120545219")
def testArithmeticRenames(self):
with self.cached_session() as s:
stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
vals = s.run(stuff)
self.assertAllEqual(vals,
[[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
self.assertAllEqual(
tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(),
-6)
self.assertAllEqual(
s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
self.assertAllEqual(
tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
a = [[1., 2., 3.], [4., 5., 6.]]
foo = np.where(np.less(a, 2), np.negative(a), a)
self.assertAllEqual(
tf.select(tf.less(a, 2), tf.neg(a), a).eval(),
foo)
self.assertAllEqual(
tf.complex_abs(tf.constant(3 + 4.j)).eval(),
5)
# # TODO(aselle): (tf.batch_*)
# ]
@test_util.run_v1_only("b/120545219")
def testBatchAndSvd(self):
with self.cached_session():
mat = [[1., 2.], [2., 3.]]
batched_mat = tf.expand_dims(mat, [0])
result = tf.matmul(mat, mat).eval()
result_batched = tf.batch_matmul(batched_mat, batched_mat).eval()
self.assertAllEqual(result_batched, np.expand_dims(result, 0))
self.assertAllEqual(
tf.svd(mat, False, True).eval(),
tf.svd(mat, compute_uv=False, full_matrices=True).eval())
@test_util.run_v1_only("b/120545219")
def testCrossEntropy(self):
# TODO(aselle): Test sparse_softmax_...
with self.cached_session():
labels = [.8, .5, .2, .1]
logits = [.9, .1, .3, .1]
self.assertAllEqual(
tf.nn.softmax_cross_entropy_with_logits(
logits, labels).eval(),
tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits).eval())
self.assertAllEqual(
tf.nn.sigmoid_cross_entropy_with_logits(
logits, labels).eval(),
tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits).eval())
@test_util.run_v1_only("b/120545219")
def testVariables(self):
with self.cached_session() as s:
# make some variables
_ = [tf.Variable([1, 2, 3], dtype=tf.float32),
tf.Variable([1, 2, 3], dtype=tf.int32)]
s.run(tf.global_variables_initializer())
_ = [v.name for v in tf.all_variables()]
_ = [v.name for v in tf.local_variables()]
@test_util.run_v1_only("b/120545219")
def testSummaries(self):
with self.cached_session() as s:
var = tf.Variable([1, 2, 3], dtype=tf.float32)
s.run(tf.global_variables_initializer())
x, y = np.meshgrid(np.linspace(-10, 10, 256), np.linspace(-10, 10, 256))
image = np.sin(x**2 + y**2) / np.sqrt(x**2 + y**2) * .5 + .5
image = image[None, :, :, None]
# make a dummy sound
freq = 440 # A = 440Hz
sampling_frequency = 11000
audio = np.sin(2 * np.pi * np.linspace(0, 1, sampling_frequency) * freq)
audio = audio[None, :, None]
test_dir = tempfile.mkdtemp()
# test summaries
writer = tf.train.SummaryWriter(test_dir)
summaries = [
tf.scalar_summary("scalar_var", var[0]),
tf.scalar_summary("scalar_reduce_var", tf.reduce_sum(var)),
tf.histogram_summary("var_histogram", var),
tf.image_summary("sin_image", image),
tf.audio_summary("sin_wave", audio, sampling_frequency),
]
run_summaries = s.run(summaries)
writer.add_summary(s.run(tf.merge_summary(inputs=run_summaries)))
# This is redundant, but we want to be able to rewrite the command
writer.add_summary(s.run(tf.merge_all_summaries()))
writer.close()
shutil.rmtree(test_dir)
if __name__ == "__main__":
test_lib.main()
| tensorflow-master | tensorflow/tools/compatibility/testdata/test_file_v0_11.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Script for updating tensorflow/tools/compatibility/reorders_v2.py.
To update reorders_v2.py, run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
# pylint: enable=line-too-long
import tensorflow as tf
# This import is needed so that TensorFlow python modules are in sys.modules.
from tensorflow import python as tf_python # pylint: disable=unused-import
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import tf_upgrade_v2
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/reorders_v2.py'
_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
def collect_function_arg_names(function_names):
"""Determines argument names for reordered function signatures.
Args:
function_names: Functions to collect arguments for.
Returns:
Dictionary mapping function name to its arguments.
"""
# Map from reordered function name to its arguments.
function_to_args = {}
def visit(unused_path, unused_parent, children):
"""Visitor that collects arguments for reordered functions."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
api_names_v1 = ['tf.%s' % name for name in api_names_v1]
matches_function_names = any(
name in function_names for name in api_names_v1)
if matches_function_names:
if tf_inspect.isclass(attr):
# Get constructor arguments if attr is a class
arg_list = tf_inspect.getargspec(
getattr(attr, '__init__'))[0]
arg_list = arg_list[1:] # skip 'self' argument
else:
# Get function arguments.
# getargspec returns a tuple of (args, varargs, keywords, defaults)
# we just look at args.
arg_list = tf_inspect.getargspec(attr)[0]
for name in api_names_v1:
function_to_args[name] = arg_list
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
return function_to_args
def get_reorder_line(name, arg_list):
return ' \'%s\': %s' % (name, str(arg_list))
def update_reorders_v2(output_file_path):
"""Writes a Python dictionary mapping function name to argument order.
Args:
output_file_path: File path to write output to. Any existing contents
would be replaced.
"""
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
all_reorders = collect_function_arg_names(reordered_function_names)
# List of reorder lines to write to output file in the form:
# 'tf.function_name': ['arg1', 'arg2', ...]
rename_lines = [
get_reorder_line(name, arg_names)
for name, arg_names in all_reorders.items()]
renames_file_text = '%sreorders = {\n%s\n}\n' % (
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
file_io.write_string_to_file(output_file_path, renames_file_text)
def main(unused_argv):
update_reorders_v2(_OUTPUT_FILE_PATH)
if __name__ == '__main__':
app.run(main=main)
| tensorflow-master | tensorflow/tools/compatibility/update/generate_v2_reorders_map.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Script for updating tensorflow/tools/compatibility/renames_v2.py.
To update renames_v2.py, run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
"""
# pylint: enable=line-too-long
import sys
import tensorflow as tf
# This import is needed so that TensorFlow python modules are in sys.modules.
from tensorflow import python as tf_python # pylint: disable=unused-import
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import all_renames_v2
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
This file should be updated whenever endpoints are deprecated.
\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
def get_canonical_name(v2_names, v1_name):
if v2_names:
return v2_names[0]
return 'compat.v1.%s' % v1_name
def get_all_v2_names():
"""Get a set of function/class names available in TensorFlow 2.0."""
v2_names = set() # All op names in TensorFlow 2.0
def visit(unused_path, unused_parent, children):
"""Visitor that collects TF 2.0 names."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
v2_names.add(name)
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1']
traverse.traverse(tf.compat.v2, visitor)
return v2_names
def collect_constant_renames():
"""Looks for constants that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
renames = set()
for module in sys.modules.values():
constants_v1_list = tf_export.get_v1_constants(module)
constants_v2_list = tf_export.get_v2_constants(module)
# _tf_api_constants attribute contains a list of tuples:
# (api_names_list, constant_name)
# We want to find API names that are in V1 but not in V2 for the same
# constant_names.
# First, we convert constants_v1_list and constants_v2_list to
# dictionaries for easier lookup.
constants_v1 = {constant_name: api_names
for api_names, constant_name in constants_v1_list}
constants_v2 = {constant_name: api_names
for api_names, constant_name in constants_v2_list}
# Second, we look for names that are in V1 but not in V2.
for constant_name, api_names_v1 in constants_v1.items():
api_names_v2 = constants_v2[constant_name]
for name in api_names_v1:
if name not in api_names_v2:
renames.add((name, get_canonical_name(api_names_v2, name)))
return renames
def collect_function_renames():
"""Looks for functions/classes that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
# Set of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
renames = set()
def visit(unused_path, unused_parent, children):
"""Visitor that collects rename strings to add to rename_line_set."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
api_names_v2 = tf_export.get_v2_names(attr)
deprecated_api_names = set(api_names_v1) - set(api_names_v2)
for name in deprecated_api_names:
renames.add((name, get_canonical_name(api_names_v2, name)))
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
# It is possible that a different function is exported with the
# same name. For e.g. when creating a different function to
# rename arguments. Exclude it from renames in this case.
v2_names = get_all_v2_names()
renames = set((name, new_name) for name, new_name in renames
if name not in v2_names)
return renames
def get_rename_line(name, canonical_name):
return ' \'tf.%s\': \'tf.%s\'' % (name, canonical_name)
def update_renames_v2(output_file_path):
"""Writes a Python dictionary mapping deprecated to canonical API names.
Args:
output_file_path: File path to write output to. Any existing contents
would be replaced.
"""
function_renames = collect_function_renames()
constant_renames = collect_constant_renames()
all_renames = function_renames.union(constant_renames)
manual_renames = set(
all_renames_v2.manual_symbol_renames.keys())
# List of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
rename_lines = [
get_rename_line(name, canonical_name)
for name, canonical_name in all_renames
if 'tf.' + name not in manual_renames]
renames_file_text = '%srenames = {\n%s\n}\n' % (
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
file_io.write_string_to_file(output_file_path, renames_file_text)
def main(unused_argv):
update_renames_v2(_OUTPUT_FILE_PATH)
if __name__ == '__main__':
app.run(main=main)
| tensorflow-master | tensorflow/tools/compatibility/update/generate_v2_renames_map.py |
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically update TensorFlow version in source files
#
# Usage:
# ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc1
# ./tensorflow/tools/ci_build/update_version.py --nightly
#
"""Update version of TensorFlow script."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import subprocess
import time
# File parameters.
TF_SRC_DIR = "tensorflow"
VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
TENSORFLOW_BZL = "%s/tensorflow.bzl" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR, VERSION_H, SETUP_PY, README_MD]
# Version type parameters.
NIGHTLY_VERSION = 1
REGULAR_VERSION = 0
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found. Are you under the TensorFlow source root"
" directory?")
def check_all_files():
"""Check all relevant files necessary for upgrade."""
for file_name in RELEVANT_FILES:
check_existence(file_name)
def replace_string_in_line(search, replace, filename):
"""Replace with sed when regex is required."""
with open(filename, "r") as source:
content = source.read()
with open(filename, "w") as source:
source.write(re.sub(search, replace, content))
class Version(object):
"""Version class object that stores SemVer version information."""
def __init__(self, major, minor, patch, identifier_string, version_type):
"""Constructor.
Args:
major: major string eg. (1)
minor: minor string eg. (3)
patch: patch string eg. (1)
identifier_string: extension string eg. (-rc0)
version_type: version parameter ((REGULAR|NIGHTLY)_VERSION)
"""
self.major = major
self.minor = minor
self.patch = patch
self.identifier_string = identifier_string
self.version_type = version_type
self._update_string()
def _update_string(self):
self.string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
def __str__(self):
return self.string
def set_identifier_string(self, identifier_string):
self.identifier_string = identifier_string
self._update_string()
@property
def pep_440_str(self):
if self.version_type == REGULAR_VERSION:
return_string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
return return_string.replace("-", "")
else:
return_string = "%s.%s.%s" % (self.major,
self.minor,
self.identifier_string)
return return_string.replace("-", "")
@staticmethod
def parse_from_string(string, version_type):
"""Returns version object from Semver string.
Args:
string: version string
version_type: version parameter
Raises:
RuntimeError: If the version string is not valid.
"""
# Check validity of new version string.
if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string):
raise RuntimeError("Invalid version string: %s" % string)
major, minor, extension = string.split(".", 2)
# Isolate patch and identifier string if identifier string exists.
extension_split = extension.split("-", 1)
patch = extension_split[0]
if len(extension_split) == 2:
identifier_string = "-" + extension_split[1]
else:
identifier_string = ""
return Version(major,
minor,
patch,
identifier_string,
version_type)
def get_current_semver_version():
"""Returns a Version object of current version.
Returns:
version: Version object of current SemVer string based on information from
core/public/version.h
"""
# Get current version information.
version_file = open(VERSION_H, "r")
for line in version_file:
major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line)
minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line)
patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line)
extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line)
if major_match:
old_major = major_match.group(1)
if minor_match:
old_minor = minor_match.group(1)
if patch_match:
old_patch_num = patch_match.group(1)
if extension_match:
old_extension = extension_match.group(1)
break
if "dev" in old_extension:
version_type = NIGHTLY_VERSION
else:
version_type = REGULAR_VERSION
return Version(old_major,
old_minor,
old_patch_num,
old_extension,
version_type)
def update_version_h(old_version, new_version):
"""Update tensorflow/core/public/version.h."""
replace_string_in_line("#define TF_MAJOR_VERSION %s" % old_version.major,
"#define TF_MAJOR_VERSION %s" % new_version.major,
VERSION_H)
replace_string_in_line("#define TF_MINOR_VERSION %s" % old_version.minor,
"#define TF_MINOR_VERSION %s" % new_version.minor,
VERSION_H)
replace_string_in_line("#define TF_PATCH_VERSION %s" % old_version.patch,
"#define TF_PATCH_VERSION %s" % new_version.patch,
VERSION_H)
replace_string_in_line(
"#define TF_VERSION_SUFFIX \"%s\"" % old_version.identifier_string,
"#define TF_VERSION_SUFFIX \"%s\"" % new_version.identifier_string,
VERSION_H)
def update_setup_dot_py(old_version, new_version):
"""Update setup.py."""
replace_string_in_line("_VERSION = '%s'" % old_version.string,
"_VERSION = '%s'" % new_version.string, SETUP_PY)
def update_readme(old_version, new_version):
"""Update README."""
pep_440_str = new_version.pep_440_str
replace_string_in_line(r"%s\.%s\.([[:alnum:]]+)-" % (old_version.major,
old_version.minor),
"%s-" % pep_440_str, README_MD)
def update_tensorflow_bzl(old_version, new_version):
"""Update tensorflow.bzl."""
old_mmp = "%s.%s.%s" % (old_version.major, old_version.minor,
old_version.patch)
new_mmp = "%s.%s.%s" % (new_version.major, new_version.minor,
new_version.patch)
replace_string_in_line('VERSION = "%s"' % old_mmp,
'VERSION = "%s"' % new_mmp, TENSORFLOW_BZL)
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
def check_for_lingering_string(lingering_string):
"""Check for given lingering strings."""
formatted_string = lingering_string.replace(".", r"\.")
try:
linger_str_output = subprocess.check_output(
["grep", "-rnoH", formatted_string, TF_SRC_DIR])
linger_strs = linger_str_output.decode("utf8").split("\n")
except subprocess.CalledProcessError:
linger_strs = []
if linger_strs:
print("WARNING: Below are potentially instances of lingering old version "
"string \"%s\" in source directory \"%s/\" that are not "
"updated by this script. Please check them manually!"
% (lingering_string, TF_SRC_DIR))
for linger_str in linger_strs:
print(linger_str)
else:
print("No lingering old version strings \"%s\" found in source directory"
" \"%s/\". Good." % (lingering_string, TF_SRC_DIR))
def check_for_old_version(old_version, new_version):
"""Check for old version references."""
for old_ver in [old_version.string, old_version.pep_440_str]:
check_for_lingering_string(old_ver)
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
check_for_lingering_string(old_r_major_minor)
def main():
"""This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument("--version",
help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>",
default="")
parser.add_argument("--nightly",
help="disable the service provisioning step",
action="store_true")
args = parser.parse_args()
check_all_files()
old_version = get_current_semver_version()
if args.nightly:
if args.version:
new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION)
new_version.set_identifier_string("-dev" + time.strftime("%Y%m%d"))
else:
# Dev minor version is one ahead of official.
nightly_minor_ver = int(old_version.minor) + 1
new_version = Version(old_version.major,
str(nightly_minor_ver),
old_version.patch,
"-dev" + time.strftime("%Y%m%d"),
NIGHTLY_VERSION)
else:
new_version = Version.parse_from_string(args.version, REGULAR_VERSION)
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
update_tensorflow_bzl(old_version, new_version)
# Print transition details.
print("Major: %s -> %s" % (old_version.major, new_version.major))
print("Minor: %s -> %s" % (old_version.minor, new_version.minor))
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
if __name__ == "__main__":
main()
| tensorflow-master | tensorflow/tools/ci_build/update_version.py |
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically copy TensorFlow binaries
#
# Usage:
# ./tensorflow/tools/ci_build/copy_binary.py --filename
# tf_nightly/tf_nightly_gpu-1.4.0.dev20170914-cp35-cp35m-manylinux1_x86_64.whl
# --new_py_ver 36
#
"""Copy binaries of TensorFlow for different python versions."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import shutil
import tempfile
import zipfile
TF_NIGHTLY_REGEX = (r"(.+)(tf_nightly.*)-(\d\.[\d]{1,2}"
r"\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found." % filename)
def copy_binary(directory, origin_tag, new_tag, version, package):
"""Rename and copy binaries for different python versions.
Arguments:
directory: string of directory
origin_tag: str of the old python version tag
new_tag: str of the new tag
version: the version of the package
package: str, name of the package
"""
print("Rename and copy binaries with %s to %s." % (origin_tag, new_tag))
origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)
new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)
zip_ref = zipfile.ZipFile(os.path.join(directory, origin_binary), "r")
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
zip_ref.extractall()
zip_ref.close()
old_py_ver = re.search(r"(cp\d\d-cp\d\d)", origin_tag).group(1)
new_py_ver = re.search(r"(cp\d\d-cp\d\d)", new_tag).group(1)
wheel_file = os.path.join(
tmpdir, "%s-%s.dist-info" % (package, version), "WHEEL")
with open(wheel_file, "r") as f:
content = f.read()
with open(wheel_file, "w") as f:
f.write(content.replace(old_py_ver, new_py_ver))
zout = zipfile.ZipFile(directory + new_binary, "w", zipfile.ZIP_DEFLATED)
zip_these_files = [
"%s-%s.dist-info" % (package, version),
"%s-%s.data" % (package, version),
"tensorflow",
"tensorflow_core",
]
for dirname in zip_these_files:
for root, _, files in os.walk(dirname):
for filename in files:
zout.write(os.path.join(root, filename))
zout.close()
finally:
shutil.rmtree(tmpdir)
def main():
"""This script copies binaries.
Requirements:
filename: The path to the whl file
AND
new_py_ver: Create a nightly tag with current date
Raises:
RuntimeError: If the whl file was not found
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument(
"--filename", help="path to whl file we are copying", required=True)
parser.add_argument(
"--new_py_ver", help="two digit py version eg. 27 or 33", required=True)
args = parser.parse_args()
# Argument checking
args.filename = os.path.abspath(args.filename)
check_existence(args.filename)
regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)
directory = regex_groups.group(1)
package = regex_groups.group(2)
version = regex_groups.group(3)
origin_tag = regex_groups.group(4)
old_py_ver = re.search(r"(cp\d\d)", origin_tag).group(1)
# Create new tags
new_tag = origin_tag.replace(old_py_ver, "cp" + args.new_py_ver)
# Copy the binary with the info we have
copy_binary(directory, origin_tag, new_tag, version, package)
if __name__ == "__main__":
main()
| tensorflow-master | tensorflow/tools/ci_build/copy_binary.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configure build environment for certain Intel platforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import subprocess
NEHALEM_CPU_INSTRUCTIONS = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE4.1", "SSE4.2", "POPCNT"
]
SANDYBRIDGE_CPU_INSTRUCTIONS = NEHALEM_CPU_INSTRUCTIONS[:]
SANDYBRIDGE_CPU_INSTRUCTIONS.extend(["AVX", "AES", "PCLMUL"])
HASWELL_CPU_INSTRUCTIONS = SANDYBRIDGE_CPU_INSTRUCTIONS[:]
HASWELL_CPU_INSTRUCTIONS.extend(
["FSGSBASE", "RDRND", "FMA", "BMI", "BMI2", "F16C", "MOVBE", "AVX2"])
SKYLAKE_CPU_INSTRUCTIONS = HASWELL_CPU_INSTRUCTIONS[:]
SKYLAKE_CPU_INSTRUCTIONS.extend([
"PKU", "RDSEED", "ADCX", "PREFETCHW", "CLFLUSHOPT", "XSAVEC", "XSAVES",
"AVX512F", "CLWB", "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512CD"
])
ICELAKE_CPU_INSTRUCTIONS = SKYLAKE_CPU_INSTRUCTIONS[:]
ICELAKE_CPU_INSTRUCTIONS.extend([
"AVX512VBMI", "AVX512IFMA", "SHA", "CLWB", "UMIP", "RDPID", "GFNI",
"AVX512VBMI2", "AVX512VPOPCNTDQ", "AVX512BITALG", "AVX512VNNI",
"VPCLMULQDQ", "VAES"
])
BASIC_BUILD_OPTS = ["--cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0", "--copt=-O3"]
SECURE_BUILD_OPTS = [
"--copt=-Wformat", "--copt=-Wformat-security", "--copt=-fstack-protector",
"--copt=-fPIC", "--copt=-fpic", "--linkopt=-znoexecstack",
"--linkopt=-zrelro", "--linkopt=-znow", "--linkopt=-fstack-protector"
]
class BuildEnvSetter(object):
"""Prepares the proper environment settings for various Intel platforms."""
default_platform_ = "haswell"
PLATFORMS = {
"nehalem": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": NEHALEM_CPU_INSTRUCTIONS
},
"sandybridge": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": SANDYBRIDGE_CPU_INSTRUCTIONS
},
"haswell": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": HASWELL_CPU_INSTRUCTIONS
},
"skylake": {
"min_gcc_major_version": "6",
"min_gcc_minor_version": "0",
"flags": SKYLAKE_CPU_INSTRUCTIONS
},
"icelake": {
"min_gcc_major_version": "8",
"min_gcc_minor_version": "0",
"flags": ICELAKE_CPU_INSTRUCTIONS
}
}
def __init__(self):
self.args = None
self.bazel_flags_ = "build "
self.go()
def gcc_version_ok(self, min_gcc_major_version, min_gcc_minor_version):
"""Make sure the GCC version installed on the machine is acceptable."""
# check to see if gcc is present
gcc_path = ""
gcc_path_cmd = "command -v gcc"
try:
print("gcc_path_cmd = {}".format(gcc_path_cmd))
gcc_path = subprocess.check_output(gcc_path_cmd, shell=True,
stderr=subprocess.STDOUT).\
strip()
print("gcc located here: {}".format(gcc_path))
if not os.access(gcc_path, os.F_OK | os.X_OK):
raise ValueError(
"{} does not exist or is not executable.".format(gcc_path))
gcc_output = subprocess.check_output(
[gcc_path, "-dumpfullversion", "-dumpversion"],
stderr=subprocess.STDOUT).strip()
# handle python2 vs 3 (bytes vs str type)
if isinstance(gcc_output, bytes):
gcc_output = gcc_output.decode("utf-8")
print("gcc version: {}".format(gcc_output))
gcc_info = gcc_output.split(".")
if gcc_info[0] < min_gcc_major_version:
print("Your MAJOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_info[0],
min_gcc_major_version,
min_gcc_minor_version))
return False
elif gcc_info[0] == min_gcc_major_version:
if gcc_info[1] < min_gcc_minor_version:
print("Your MINOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_info[1],
min_gcc_major_version,
min_gcc_minor_version))
return False
return True
else:
self._debug("gcc version OK: {}.{}".format(gcc_info[0], gcc_info[1]))
return True
except subprocess.CalledProcessException as e:
print("Problem getting gcc info: {}".format(e))
return False
def parse_args(self):
"""Set up argument parser, and parse CLI args."""
arg_parser = argparse.ArgumentParser(
description="Parse the arguments for the "
"TensorFlow build environment "
" setter")
arg_parser.add_argument(
"--disable-mkl",
dest="disable_mkl",
help="Turn off MKL. By default the compiler flag "
"--config=mkl is enabled.",
action="store_true")
arg_parser.add_argument(
"--disable-v2",
dest="disable_v2",
help="Don't build TensorFlow v2. By default the "
" compiler flag --config=v2 is enabled.",
action="store_true")
arg_parser.add_argument(
"-s",
"--secure-build",
dest="secure_build",
help="Enable secure build flags.",
action="store_true")
arg_parser.add_argument(
"-p",
"--platform",
choices=self.PLATFORMS.keys(),
help="The target platform.",
dest="target_platform",
default=self.default_platform_)
arg_parser.add_argument(
"-f",
"--bazelrc-file",
dest="bazelrc_file",
help="The full path to the bazelrc file into which "
"the build command will be written. The path "
"will be relative to the container "
" environment.",
required=True)
self.args = arg_parser.parse_args()
def validate_args(self):
if os.path.exists(self.args.bazelrc_file):
if os.path.isfile(self.args.bazelrc_file):
self._debug("The file {} exists and will be deleted.".format(
self.args.bazelrc_file))
elif os.path.isdir(self.args.bazelrc_file):
raise ValueError("{} is not a valid file name".format(
self.args.bazelrc_file))
return True
def set_build_args(self):
"""Generate Bazel build flags."""
for flag in BASIC_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if self.args.secure_build:
for flag in SECURE_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
for flag in self.PLATFORMS.get(self.args.target_platform)["flags"]:
self.bazel_flags_ += "--copt=-m{} ".format(flag.lower())
if not self.args.disable_mkl:
self.bazel_flags_ += "--config=mkl "
if not self.args.disable_v2:
self.bazel_flags_ += "--config=v2 "
def write_build_args(self):
self._debug("Writing build flags: {}".format(self.bazel_flags_))
with open(self.args.bazelrc_file, "w") as f:
f.write(self.bazel_flags_)
def _debug(self, msg):
print(msg)
def go(self):
self.parse_args()
target_platform = self.PLATFORMS.get(self.args.target_platform)
if self.validate_args() and \
self.gcc_version_ok(target_platform["min_gcc_major_version"],
target_platform["min_gcc_minor_version"]):
self.set_build_args()
self.write_build_args()
else:
print("Error.")
env_setter = BuildEnvSetter()
| tensorflow-master | tensorflow/tools/ci_build/linux/mkl/set-build-env.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check that TensorFlow python files have certain __future__ imports.
This makes it easier to find Python 2.7 / Python 3.x incompatibility bugs.
In particular, this test makes it illegal to write a Python file that
doesn't import division from __future__, which can catch subtle division
bugs in Python 3.
Note: We can't use tf.test in this file because it needs to run in an
environment that doesn't include license-free gen_blah_ops.py files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import six
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
FUTURES_PATTERN = re.compile(r'^from __future__ import (\w+)\s*$')
FUTURES_PATTERN_2 = re.compile(
r'^from __future__ import (\w+), (\w+), (\w+)\s*$')
FUTURES_PATTERN_3 = re.compile(r'^from __future__ import (\w+) as \w+\s*$')
REQUIRED_FUTURES = frozenset(['absolute_import', 'division', 'print_function'])
WHITELIST = [
'python/platform/control_imports.py',
'tools/docker/jupyter_notebook_config.py',
'tools/ci_build/update_version.py',
'tools/ci_build/copy_binary.py',
]
# Tests that must *not* import division
OLD_DIVISION = [
'python/framework/tensor_shape_div_test.py',
'python/kernel_tests/division_past_test.py',
]
def check_file(path, old_division):
futures = set()
count = 0
for line in open(path, encoding='utf-8') if six.PY3 else open(path):
count += 1
m = FUTURES_PATTERN.match(line)
if not m:
m = FUTURES_PATTERN_3.match(line)
if m:
futures.add(m.group(1))
else:
m = FUTURES_PATTERN_2.match(line)
if m:
for entry in m.groups():
futures.add(entry)
if not count:
return # Skip empty files
if old_division:
# This file checks correct behavior without importing division
# from __future__, so make sure it's doing that.
expected = set(['absolute_import', 'print_function'])
if futures != expected:
raise AssertionError(('Incorrect futures for old_division file:\n'
' expected = %s\n got = %s') %
(' '.join(expected), ' '.join(futures)))
else:
missing = REQUIRED_FUTURES - futures
if missing:
raise AssertionError('Missing futures: %s' % ' '.join(missing))
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError("BASE_DIR = '%s' doesn't end with tensorflow" %
BASE_DIR)
# Verify that all files have futures
whitelist = frozenset(os.path.join(BASE_DIR, w) for w in WHITELIST)
old_division = frozenset(os.path.join(BASE_DIR, w) for w in OLD_DIVISION)
for root, _, filenames in os.walk(BASE_DIR):
for f in fnmatch.filter(filenames, '*.py'):
path = os.path.join(root, f)
if path not in whitelist:
try:
check_file(path, old_division=path in old_division)
except AssertionError as e:
short_path = path[len(BASE_DIR) + 1:]
raise AssertionError('Error in %s: %s' % (short_path, str(e)))
if __name__ == '__main__':
main()
| tensorflow-master | tensorflow/tools/test/check_futures_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import platform
import re
import socket
# pylint: disable=g-bad-import-order
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
import cpuinfo
import psutil
# pylint: enable=g-bad-import-order
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
def gather_machine_configuration():
"""Gather Machine Configuration. This is the top level fn of this library."""
config = test_log_pb2.MachineConfiguration()
config.cpu_info.CopyFrom(gather_cpu_info())
config.platform_info.CopyFrom(gather_platform_info())
# gather_available_device_info must come before gather_gpu_devices
# because the latter may access libcudart directly, which confuses
# TensorFlow StreamExecutor.
for d in gather_available_device_info():
config.available_device_info.add().CopyFrom(d)
for gpu in gpu_info_lib.gather_gpu_devices():
config.device_info.add().Pack(gpu)
config.memory_info.CopyFrom(gather_memory_info())
config.hostname = gather_hostname()
return config
def gather_hostname():
return socket.gethostname()
def gather_memory_info():
"""Gather memory info."""
mem_info = test_log_pb2.MemoryInfo()
vmem = psutil.virtual_memory()
mem_info.total = vmem.total
mem_info.available = vmem.available
return mem_info
def gather_cpu_info():
"""Gather CPU Information. Assumes all CPUs are the same."""
cpu_info = test_log_pb2.CPUInfo()
cpu_info.num_cores = multiprocessing.cpu_count()
# Gather num_cores_allowed
try:
with gfile.GFile('/proc/self/status', 'rb') as fh:
nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
if nc: # e.g. 'ff' => 8, 'fff' => 12
cpu_info.num_cores_allowed = (
bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
except errors.OpError:
pass
finally:
if cpu_info.num_cores_allowed == 0:
cpu_info.num_cores_allowed = cpu_info.num_cores
# Gather the rest
info = cpuinfo.get_cpu_info()
cpu_info.cpu_info = info['brand']
cpu_info.num_cores = info['count']
cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
l2_cache_size = re.match(r'(\d+)', str(info.get('l2_cache_size', '')))
if l2_cache_size:
# If a value is returned, it's in KB
cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024
# Try to get the CPU governor
try:
cpu_governors = set([
gfile.GFile(f, 'r').readline().rstrip()
for f in glob.glob(
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')
])
if cpu_governors:
if len(cpu_governors) > 1:
cpu_info.cpu_governor = 'mixed'
else:
cpu_info.cpu_governor = list(cpu_governors)[0]
except errors.OpError:
pass
return cpu_info
def gather_available_device_info():
"""Gather list of devices available to TensorFlow.
Returns:
A list of test_log_pb2.AvailableDeviceInfo messages.
"""
device_info_list = []
devices = device_lib.list_local_devices()
for d in devices:
device_info = test_log_pb2.AvailableDeviceInfo()
device_info.name = d.name
device_info.type = d.device_type
device_info.memory_limit = d.memory_limit
device_info.physical_description = d.physical_device_desc
device_info_list.append(device_info)
return device_info_list
def gather_platform_info():
"""Gather platform info."""
platform_info = test_log_pb2.PlatformInfo()
(platform_info.bits, platform_info.linkage) = platform.architecture()
platform_info.machine = platform.machine()
platform_info.release = platform.release()
platform_info.system = platform.system()
platform_info.version = platform.version()
return platform_info
| tensorflow-master | tensorflow/tools/test/system_info_lib.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import app
from tensorflow.tools.test import system_info_lib
def main(unused_args):
config = system_info_lib.gather_machine_configuration()
print(config)
if __name__ == "__main__":
app.run()
| tensorflow-master | tensorflow/tools/test/system_info.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command to upload benchmark test results to a cloud datastore.
This uploader script is typically run periodically as a cron job. It locates,
in a specified data directory, files that contain benchmark test results. The
results are written by the "run_and_gather_logs.py" script using the JSON-format
serialization of the "TestResults" protobuf message (core/util/test_log.proto).
For each file, the uploader reads the "TestResults" data, transforms it into
the schema used in the datastore (see below), and upload it to the datastore.
After processing a file, the uploader moves it to a specified archive directory
for safe-keeping.
The uploader uses file-level exclusive locking (non-blocking flock) which allows
multiple instances of this script to run concurrently if desired, splitting the
task among them, each one processing and archiving different files.
The "TestResults" object contains test metadata and multiple benchmark entries.
The datastore schema splits this information into two Kinds (like tables), one
holding the test metadata in a single "Test" Entity (like rows), and one holding
each related benchmark entry in a separate "Entry" Entity. Datastore create a
unique ID (retrieval key) for each Entity, and this ID is always returned along
with the data when an Entity is fetched.
* Test:
- test: unique name of this test (string)
- start: start time of this test run (datetime)
- info: JSON-encoded test metadata (string, not indexed)
* Entry:
- test: unique name of this test (string)
- entry: unique name of this benchmark entry within this test (string)
- start: start time of this test run (datetime)
- timing: average time (usec) per iteration of this test/entry run (float)
- info: JSON-encoded entry metadata (string, not indexed)
A few composite indexes are created (upload_test_benchmarks_index.yaml) for fast
retrieval of benchmark data and reduced I/O to the client without adding a lot
of indexing and storage burden:
* Test: (test, start) is indexed to fetch recent start times for a given test.
* Entry: (test, entry, start, timing) is indexed to use projection and only
fetch the recent (start, timing) data for a given test/entry benchmark.
Example retrieval GQL statements:
* Get the recent start times for a given test:
SELECT start FROM Test WHERE test = <test-name> AND
start >= <recent-datetime> LIMIT <count>
* Get the recent timings for a given benchmark:
SELECT start, timing FROM Entry WHERE test = <test-name> AND
entry = <entry-name> AND start >= <recent-datetime> LIMIT <count>
* Get all test names uniquified (e.g. display a list of available tests):
SELECT DISTINCT ON (test) test FROM Test
* For a given test (from the list above), get all its entry names. The list of
entry names can be extracted from the test "info" metadata for a given test
name and start time (e.g. pick the latest start time for that test).
SELECT * FROM Test WHERE test = <test-name> AND start = <latest-datetime>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import fcntl
import json
import os
import shutil
from six import text_type
from google.cloud import datastore
def is_real_file(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.path.isfile(fpath) and not os.path.islink(fpath)
def get_mtime(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.stat(fpath).st_mtime
def list_files_by_mtime(dirpath):
"""Return a list of files in the directory, sorted in increasing "mtime".
Return a list of files in the given directory, sorted from older to newer file
according to their modification times. Only return actual files, skipping
directories, symbolic links, pipes, etc.
Args:
dirpath: directory pathname
Returns:
A list of file names relative to the given directory path.
"""
files = [f for f in os.listdir(dirpath) if is_real_file(dirpath, f)]
return sorted(files, key=lambda f: get_mtime(dirpath, f))
# Note: The file locking code uses flock() instead of lockf() because benchmark
# files are only opened for reading (not writing) and we still want exclusive
# locks on them. This imposes the limitation that the data directory must be
# local, not NFS-mounted.
def lock(fd):
fcntl.flock(fd, fcntl.LOCK_EX)
def unlock(fd):
fcntl.flock(fd, fcntl.LOCK_UN)
def trylock(fd):
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except Exception: # pylint: disable=broad-except
return False
def upload_benchmark_data(client, data):
"""Parse benchmark data and use the client to upload it to the datastore.
Parse the given benchmark data from the serialized JSON-format used to write
the test results file. Create the different datastore Entities from that data
and upload them to the datastore in a batch using the client connection.
Args:
client: datastore client connection
data: JSON-encoded benchmark data
"""
test_result = json.loads(data)
test_name = text_type(test_result["name"])
start_time = datetime.datetime.utcfromtimestamp(
float(test_result["startTime"]))
batch = []
# Create the Test Entity containing all the test information as a
# non-indexed JSON blob.
t_key = client.key("Test")
t_val = datastore.Entity(t_key, exclude_from_indexes=["info"])
t_val.update({
"test": test_name,
"start": start_time,
"info": text_type(data)
})
batch.append(t_val)
# Create one Entry Entity for each benchmark entry. The wall-clock timing is
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
for ent in test_result["entries"].get("entry", []):
ent_name = text_type(ent["name"])
e_key = client.key("Entry")
e_val = datastore.Entity(e_key, exclude_from_indexes=["info"])
e_val.update({
"test": test_name,
"start": start_time,
"entry": ent_name,
"timing": ent["wallTime"],
"info": text_type(json.dumps(ent))
})
batch.append(e_val)
# Put the whole batch of Entities in the datastore.
client.put_multi(batch)
def upload_benchmark_files(opts):
"""Find benchmark files, process them, and upload their data to the datastore.
Locate benchmark files in the data directory, process them, and upload their
data to the datastore. After processing each file, move it to the archive
directory for safe-keeping. Each file is locked for processing, which allows
multiple uploader instances to run concurrently if needed, each one handling
different benchmark files, skipping those already locked by another.
Args:
opts: command line options object
Note: To use locking, the file is first opened, then its descriptor is used to
lock and read it. The lock is released when the file is closed. Do not open
that same file a 2nd time while the lock is already held, because when that
2nd file descriptor is closed, the lock will be released prematurely.
"""
client = datastore.Client()
for fname in list_files_by_mtime(opts.datadir):
fpath = os.path.join(opts.datadir, fname)
try:
with open(fpath, "r") as fd:
if trylock(fd):
upload_benchmark_data(client, fd.read())
shutil.move(fpath, os.path.join(opts.archivedir, fname))
# unlock(fd) -- When "with open()" closes fd, the lock is released.
except Exception as e: # pylint: disable=broad-except
print("Cannot process '%s', skipping. Error: %s" % (fpath, e))
def parse_cmd_line():
"""Parse command line options.
Returns:
The parsed arguments object.
"""
desc = "Upload benchmark results to datastore."
opts = [
("-a", "--archivedir", str, None, True,
"Directory where benchmark files are archived."),
("-d", "--datadir", str, None, True,
"Directory of benchmark files to upload."),
]
parser = argparse.ArgumentParser(description=desc)
for opt in opts:
parser.add_argument(opt[0], opt[1], type=opt[2], default=opt[3],
required=opt[4], help=opt[5])
return parser.parse_args()
def main():
options = parse_cmd_line()
# Check that credentials are specified to access the datastore.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"):
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS env. var. is not set.")
upload_benchmark_files(options)
if __name__ == "__main__":
main()
| tensorflow-master | tensorflow/tools/test/upload_test_benchmarks.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tensorflow-master | tensorflow/tools/test/__init__.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test runner for TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shlex
from string import maketrans
import sys
import time
from google.protobuf import json_format
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.test import run_and_gather_logs_lib
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
try:
import cpuinfo
import psutil
except ImportError as e:
tf_logging.error("\n\n\nERROR: Unable to import necessary library: {}. "
"Issuing a soft exit.\n\n\n".format(e))
sys.exit(0)
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
FLAGS = None
def gather_build_configuration():
build_config = test_log_pb2.BuildConfiguration()
build_config.mode = FLAGS.compilation_mode
# Include all flags except includes
cc_flags = [
flag for flag in shlex.split(FLAGS.cc_flags) if not flag.startswith("-i")
]
build_config.cc_flags.extend(cc_flags)
return build_config
def main(unused_args):
name = FLAGS.name
test_name = FLAGS.test_name
test_args = FLAGS.test_args
benchmark_type = FLAGS.benchmark_type
test_results, _ = run_and_gather_logs_lib.run_and_gather_logs(
name, test_name=test_name, test_args=test_args,
benchmark_type=benchmark_type)
# Additional bits we receive from bazel
test_results.build_configuration.CopyFrom(gather_build_configuration())
if not FLAGS.test_log_output_dir:
print(text_format.MessageToString(test_results))
return
if FLAGS.test_log_output_filename:
file_name = FLAGS.test_log_output_filename
else:
file_name = (name.strip("/").translate(maketrans("/:", "__")) +
time.strftime("%Y%m%d%H%M%S", time.gmtime()))
if FLAGS.test_log_output_use_tmpdir:
tmpdir = test.get_temp_dir()
output_path = os.path.join(tmpdir, FLAGS.test_log_output_dir, file_name)
else:
output_path = os.path.join(
os.path.abspath(FLAGS.test_log_output_dir), file_name)
json_test_results = json_format.MessageToJson(test_results)
gfile.GFile(output_path + ".json", "w").write(json_test_results)
tf_logging.info("Test results written to: %s" % output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register(
"type", "bool", lambda v: v.lower() in ("true", "t", "y", "yes"))
parser.add_argument(
"--name", type=str, default="", help="Benchmark target identifier.")
parser.add_argument(
"--test_name", type=str, default="", help="Test target to run.")
parser.add_argument(
"--benchmark_type",
type=str,
default="",
help="BenchmarkType enum string (benchmark type).")
parser.add_argument(
"--test_args",
type=str,
default="",
help="Test arguments, space separated.")
parser.add_argument(
"--test_log_output_use_tmpdir",
type="bool",
nargs="?",
const=True,
default=False,
help="Store the log output into tmpdir?")
parser.add_argument(
"--compilation_mode",
type=str,
default="",
help="Mode used during this build (e.g. opt, dbg).")
parser.add_argument(
"--cc_flags",
type=str,
default="",
help="CC flags used during this build.")
parser.add_argument(
"--test_log_output_dir",
type=str,
default="",
help="Directory to write benchmark results to.")
parser.add_argument(
"--test_log_output_filename",
type=str,
default="",
help="Filename to output benchmark results to. If the filename is not "
"specified, it will be automatically created based on --name "
"and current time.")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| tensorflow-master | tensorflow/tools/test/run_and_gather_logs.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import platform
from tensorflow.core.util import test_log_pb2
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
def _gather_gpu_devices_proc():
"""Try to gather NVidia GPU device information via /proc/driver."""
dev_info = []
for f in gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
bus_id = f.split("/")[5]
key_values = dict(line.rstrip().replace("\t", "").split(":", 1)
for line in gfile.GFile(f, "r"))
key_values = dict((k.lower(), v.strip(" ").rstrip(" "))
for (k, v) in key_values.items())
info = test_log_pb2.GPUInfo()
info.model = key_values.get("model", "Unknown")
info.uuid = key_values.get("gpu uuid", "Unknown")
info.bus_id = bus_id
dev_info.append(info)
return dev_info
class CUDADeviceProperties(ct.Structure):
# See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
# the cudaDeviceProp struct.
_fields_ = [
("name", ct.c_char * 256),
("totalGlobalMem", ct.c_size_t),
("sharedMemPerBlock", ct.c_size_t),
("regsPerBlock", ct.c_int),
("warpSize", ct.c_int),
("memPitch", ct.c_size_t),
("maxThreadsPerBlock", ct.c_int),
("maxThreadsDim", ct.c_int * 3),
("maxGridSize", ct.c_int * 3),
("clockRate", ct.c_int),
("totalConstMem", ct.c_size_t),
("major", ct.c_int),
("minor", ct.c_int),
("textureAlignment", ct.c_size_t),
("texturePitchAlignment", ct.c_size_t),
("deviceOverlap", ct.c_int),
("multiProcessorCount", ct.c_int),
("kernelExecTimeoutEnabled", ct.c_int),
("integrated", ct.c_int),
("canMapHostMemory", ct.c_int),
("computeMode", ct.c_int),
("maxTexture1D", ct.c_int),
("maxTexture1DMipmap", ct.c_int),
("maxTexture1DLinear", ct.c_int),
("maxTexture2D", ct.c_int * 2),
("maxTexture2DMipmap", ct.c_int * 2),
("maxTexture2DLinear", ct.c_int * 3),
("maxTexture2DGather", ct.c_int * 2),
("maxTexture3D", ct.c_int * 3),
("maxTexture3DAlt", ct.c_int * 3),
("maxTextureCubemap", ct.c_int),
("maxTexture1DLayered", ct.c_int * 2),
("maxTexture2DLayered", ct.c_int * 3),
("maxTextureCubemapLayered", ct.c_int * 2),
("maxSurface1D", ct.c_int),
("maxSurface2D", ct.c_int * 2),
("maxSurface3D", ct.c_int * 3),
("maxSurface1DLayered", ct.c_int * 2),
("maxSurface2DLayered", ct.c_int * 3),
("maxSurfaceCubemap", ct.c_int),
("maxSurfaceCubemapLayered", ct.c_int * 2),
("surfaceAlignment", ct.c_size_t),
("concurrentKernels", ct.c_int),
("ECCEnabled", ct.c_int),
("pciBusID", ct.c_int),
("pciDeviceID", ct.c_int),
("pciDomainID", ct.c_int),
("tccDriver", ct.c_int),
("asyncEngineCount", ct.c_int),
("unifiedAddressing", ct.c_int),
("memoryClockRate", ct.c_int),
("memoryBusWidth", ct.c_int),
("l2CacheSize", ct.c_int),
("maxThreadsPerMultiProcessor", ct.c_int),
("streamPrioritiesSupported", ct.c_int),
("globalL1CacheSupported", ct.c_int),
("localL1CacheSupported", ct.c_int),
("sharedMemPerMultiprocessor", ct.c_size_t),
("regsPerMultiprocessor", ct.c_int),
("managedMemSupported", ct.c_int),
("isMultiGpuBoard", ct.c_int),
("multiGpuBoardGroupID", ct.c_int),
# Pad with extra space to avoid dereference crashes if future
# versions of CUDA extend the size of this struct.
("__future_buffer", ct.c_char * 4096)
]
def _gather_gpu_devices_cudart():
"""Try to gather NVidia GPU device information via libcudart."""
dev_info = []
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
version = ct.c_int()
rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
if rc != 0:
raise ValueError("Could not get version")
if version.value < 6050:
raise NotImplementedError("CUDA version must be between >= 6.5")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
for i in range(device_count.value):
properties = CUDADeviceProperties()
rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
if rc != 0:
raise ValueError("Could not get device properties")
pci_bus_id = " " * 13
rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
if rc != 0:
raise ValueError("Could not get device PCI bus id")
info = test_log_pb2.GPUInfo() # No UUID available
info.model = properties.name
info.bus_id = pci_bus_id
dev_info.append(info)
del properties
return dev_info
def gather_gpu_devices():
"""Gather gpu device info.
Returns:
A list of test_log_pb2.GPUInfo messages.
"""
try:
# Prefer using /proc if possible, it provides the UUID.
dev_info = _gather_gpu_devices_proc()
if not dev_info:
raise ValueError("No devices found")
return dev_info
except (IOError, ValueError, errors.OpError):
pass
try:
# Fall back on using libcudart
return _gather_gpu_devices_cudart()
except (OSError, ValueError, NotImplementedError, errors.OpError):
return []
| tensorflow-master | tensorflow/tools/test/gpu_info_lib.py |
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Test that checks if we have any issues with case insensitive filesystems.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
ERROR_MESSAGE = """
Files with same name but different case detected in directory: {}
"""
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError(
"BASE_DIR = '%s' doesn't end with tensorflow" % BASE_DIR)
for dirpath, dirnames, filenames in os.walk(BASE_DIR, followlinks=True):
lowercase_directories = [x.lower() for x in dirnames]
lowercase_files = [x.lower() for x in filenames]
lowercase_dir_contents = lowercase_directories + lowercase_files
if len(lowercase_dir_contents) != len(set(lowercase_dir_contents)):
raise AssertionError(ERROR_MESSAGE.format(dirpath))
if __name__ == '__main__':
main()
| tensorflow-master | tensorflow/tools/test/file_name_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import shlex
import subprocess
import tempfile
import time
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
from tensorflow.tools.test import system_info_lib
class MissingLogsError(Exception):
pass
def get_git_commit_sha():
"""Get git commit SHA for this build.
Attempt to get the SHA from environment variable GIT_COMMIT, which should
be available on Jenkins build agents.
Returns:
SHA hash of the git commit used for the build, if available
"""
return os.getenv("GIT_COMMIT")
def process_test_logs(name, test_name, test_args, benchmark_type,
start_time, run_time, log_files):
"""Gather test information and put it in a TestResults proto.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
start_time: Test starting time (epoch)
run_time: Wall time that the test ran for
log_files: Paths to the log files
Returns:
A TestResults proto
"""
results = test_log_pb2.TestResults()
results.name = name
results.target = test_name
results.start_time = start_time
results.run_time = run_time
results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(
benchmark_type.upper())
# Gather source code information
git_sha = get_git_commit_sha()
if git_sha:
results.commit_id.hash = git_sha
results.entries.CopyFrom(process_benchmarks(log_files))
results.run_configuration.argument.extend(test_args)
results.machine_configuration.CopyFrom(
system_info_lib.gather_machine_configuration())
return results
def process_benchmarks(log_files):
benchmarks = test_log_pb2.BenchmarkEntries()
for f in log_files:
content = gfile.GFile(f, "rb").read()
if benchmarks.MergeFromString(content) != len(content):
raise Exception("Failed parsing benchmark entry from %s" % f)
return benchmarks
def run_and_gather_logs(name, test_name, test_args,
benchmark_type):
"""Run the bazel test given by test_name. Gather and return the logs.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
Returns:
A tuple (test_results, mangled_test_name), where
test_results: A test_log_pb2.TestResults proto
test_adjusted_name: Unique benchmark name that consists of
benchmark name optionally followed by GPU type.
Raises:
ValueError: If the test_name is not a valid target.
subprocess.CalledProcessError: If the target itself fails.
IOError: If there are problems gathering test log output from the test.
MissingLogsError: If we couldn't find benchmark logs.
"""
if not (test_name and test_name.startswith("//") and ".." not in test_name and
not test_name.endswith(":") and not test_name.endswith(":all") and
not test_name.endswith("...") and len(test_name.split(":")) == 2):
raise ValueError("Expected test_name parameter with a unique test, e.g.: "
"--test_name=//path/to:test")
test_executable = test_name.rstrip().strip("/").replace(":", "/")
if gfile.Exists(os.path.join("bazel-bin", test_executable)):
# Running in standalone mode from core of the repository
test_executable = os.path.join("bazel-bin", test_executable)
else:
# Hopefully running in sandboxed mode
test_executable = os.path.join(".", test_executable)
test_adjusted_name = name
gpu_config = gpu_info_lib.gather_gpu_devices()
if gpu_config:
gpu_name = gpu_config[0].model
gpu_short_name_match = re.search(r"Tesla (K40|K80|P100|V100)", gpu_name)
if gpu_short_name_match:
gpu_short_name = gpu_short_name_match.group(0)
test_adjusted_name = name + "|" + gpu_short_name.replace(" ", "_")
temp_directory = tempfile.mkdtemp(prefix="run_and_gather_logs")
mangled_test_name = (test_adjusted_name.strip("/")
.replace("|", "_").replace("/", "_").replace(":", "_"))
test_file_prefix = os.path.join(temp_directory, mangled_test_name)
test_file_prefix = "%s." % test_file_prefix
try:
if not gfile.Exists(test_executable):
raise ValueError("Executable does not exist: %s" % test_executable)
test_args = shlex.split(test_args)
# This key is defined in tf/core/util/reporter.h as
# TestReporter::kTestReporterEnv.
os.environ["TEST_REPORT_FILE_PREFIX"] = test_file_prefix
start_time = time.time()
subprocess.check_call([test_executable] + test_args)
run_time = time.time() - start_time
log_files = gfile.Glob("{}*".format(test_file_prefix))
if not log_files:
raise MissingLogsError("No log files found at %s." % test_file_prefix)
return (process_test_logs(
test_adjusted_name,
test_name=test_name,
test_args=test_args,
benchmark_type=benchmark_type,
start_time=int(start_time),
run_time=run_time,
log_files=log_files), test_adjusted_name)
finally:
try:
gfile.DeleteRecursively(temp_directory)
except OSError:
pass
| tensorflow-master | tensorflow/tools/test/run_and_gather_logs_lib.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to check that py_test are properly loaded in BUILD files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
def check_output_despite_error(args):
"""Get output of args from command line, even if there are errors.
Args:
args: a list of command line args.
Returns:
output as string.
"""
try:
output = subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
return output.strip()
def main():
# Get all py_test target, note bazel query result will also include
# cuda_py_test etc.
try:
targets = subprocess.check_output([
'bazel', 'query',
'kind(py_test, //tensorflow/contrib/... + '
'//tensorflow/python/... - '
'//tensorflow/contrib/tensorboard/...)']).strip()
except subprocess.CalledProcessError as e:
targets = e.output
targets = targets.decode("utf-8") if isinstance(targets, bytes) else targets
# Only keep py_test targets, and filter out targets with 'no_pip' tag.
valid_targets = []
for target in targets.split('\n'):
kind = check_output_despite_error(['buildozer', 'print kind', target])
if kind == 'py_test':
tags = check_output_despite_error(['buildozer', 'print tags', target])
if 'no_pip' not in tags:
valid_targets.append(target)
# Get all BUILD files for all valid targets.
build_files = set()
for target in valid_targets:
build_files.add(os.path.join(target[2:].split(':')[0], 'BUILD'))
# Check if BUILD files load py_test.
files_missing_load = []
for build_file in build_files:
updated_build_file = subprocess.check_output(
['buildozer', '-stdout', 'new_load //tensorflow:tensorflow.bzl py_test',
build_file])
with open(build_file, 'r') as f:
if f.read() != updated_build_file:
files_missing_load.append(build_file)
if files_missing_load:
raise RuntimeError('The following files are missing %s:\n %s' % (
'load("//tensorflow:tensorflow.bzl", "py_test").\nThis load statement'
' is needed because otherwise pip tests will try to use their '
'dependencies, which are not visible to them.',
'\n'.join(files_missing_load)))
else:
print('TEST PASSED.')
if __name__ == '__main__':
main()
| tensorflow-master | tensorflow/tools/pip_package/check_load_py_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| tensorflow-master | tensorflow/tools/pip_package/simple_console_for_windows.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow is an open source machine learning framework for everyone.
TensorFlow is an open source software library for high performance numerical
computation. Its flexible architecture allows easy deployment of computation
across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
of servers to mobile and edge devices.
Originally developed by researchers and engineers from the Google Brain team
within Google's AI organization, it comes with strong support for machine
learning and deep learning and the flexible numerical computation core is used
across many other scientific domains.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '1.13.1'
REQUIRED_PACKAGES = [
'absl-py >= 0.7.0',
'astor >= 0.6.0',
'gast >= 0.2.0',
'google_pasta >= 0.1.6',
'keras_applications >= 1.0.8',
'keras_preprocessing >= 1.0.5',
'numpy >= 1.14.5, < 2.0',
'six >= 1.10.0',
'protobuf >= 3.6.1',
'tensorboard >= 1.14.0, < 1.15.0',
'tensorflow_estimator >= 1.14.0rc0, < 1.15.0rc0',
'termcolor >= 1.1.0',
'wrapt >= 1.11.1',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.14.0a0, < 1.15.0a0'
elif 'tensorflow_estimator' in pkg and '2.0' in project_name:
REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview'
elif 'tensorflow_estimator' in pkg:
REQUIRED_PACKAGES[i] = 'tf-estimator-nightly'
# weakref.finalize and enum were introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
REQUIRED_PACKAGES.append('enum34 >= 1.1.6')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main',
'tflite_convert = tensorflow.lite.python.tflite_convert:main',
'toco = tensorflow.lite.python.tflite_convert:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
]
# pylint: enable=line-too-long
# Only keep freeze_graph console script in 1.X.
if _VERSION.startswith('1.') and '_2.0' not in project_name:
CONSOLE_SCRIPTS.append(
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main')
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core',
'include')
self.install_lib = self.install_platlib
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/',
install_dir)
# Copy external code headers into tensorflow_core/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
external_header_locations = [
'tensorflow_core/include/external/eigen_archive/',
'tensorflow_core/include/external/com_google_absl/',
]
for location in external_header_locations:
if location in install_dir:
extra_dir = install_dir.replace(location, '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (
list(find_files('*.h', 'tensorflow_core/core')) +
list(find_files('*.h', 'tensorflow_core/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*.inc', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) + list(
find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) +
list(
find_files('*.inc', 'tensorflow_core/include/external/com_google_absl'))
+ list(find_files('*', 'tensorflow_core/include/external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
| tensorflow-master | tensorflow/tools/pip_package/setup.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
# List of file paths containing BUILD files that should not be included for the
# pip smoke test.
BUILD_BLACKLIST = [
"tensorflow/lite/delegates/gpu",
"tensorflow/lite/delegates/gpu/metal",
"tensorflow/lite/delegates/gpu/metal/kernels",
"tensorflow/lite/experimental/objc",
"tensorflow/lite/experimental/swift",
]
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and root not in BUILD_BLACKLIST):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
contrib_targets = GetBuild("tensorflow/contrib")
tensorboard_targets = GetBuild("tensorflow/contrib/tensorboard")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python + contrib - tensorboard - attr(manual|pno_pip)
targets = " + ".join(python_targets)
for t in contrib_targets:
targets += " + " + t
for t in tensorboard_targets:
targets += " - " + t
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# TODO(amitpatankar): Clean up blacklist.
# List of dependencies that should not included in the pip package.
DEPENDENCY_BLACKLIST = [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/python:test_ops_2",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/core:image_testdata",
"//tensorflow/core:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python/debug:grpc_tensorflow_server.par",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python:framework/test_file_system.so",
"//tensorflow/python:util_nest_test_main_lib",
# lite
"//tensorflow/lite/experimental/examples/lstm:rnn_cell",
"//tensorflow/lite/experimental/examples/lstm:rnn_cell.py",
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/lite/python:interpreter",
"//tensorflow/lite/python:interpreter_test",
"//tensorflow/lite/python:interpreter.py",
"//tensorflow/lite/python:interpreter_test.py",
# contrib
"//tensorflow/contrib/eager/python/examples/revnet:blocks_test_main_lib",
"//tensorflow/contrib/session_bundle:session_bundle_half_plus_two",
"//tensorflow/contrib/keras:testing_utils",
"//tensorflow/contrib/ffmpeg:test_data",
"//tensorflow/contrib/fused_conv:fused_conv2d_bias_activation_op_test_base",
"//tensorflow/contrib/hadoop:test_data",
"//tensorflow/contrib/factorization/examples:mnist",
"//tensorflow/contrib/factorization/examples:mnist.py",
"//tensorflow/contrib/factorization:factorization_py_CYCLIC_DEPENDENCIES_THAT_NEED_TO_GO", # pylint:disable=line-too-long
"//tensorflow/contrib/framework:checkpoint_ops_testdata",
"//tensorflow/contrib/bayesflow:reinforce_simple_example",
"//tensorflow/contrib/bayesflow:examples/reinforce_simple/reinforce_simple_example.py", # pylint:disable=line-too-long
"//tensorflow/contrib/saved_model:reader", # Not present in v2
"//tensorflow/contrib/timeseries/examples:predict",
"//tensorflow/contrib/timeseries/examples:multivariate",
"//tensorflow/contrib/timeseries/examples:known_anomaly",
"//tensorflow/contrib/timeseries/examples:data/period_trend.csv", # pylint:disable=line-too-long
"//tensorflow/contrib/timeseries/python/timeseries:test_utils",
"//tensorflow/contrib/timeseries/python/timeseries/state_space_models:test_utils", # pylint:disable=line-too-long
"//tensorflow/contrib/image:sparse_image_warp_test_data",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode("utf-8")
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode("utf-8")
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = [
"_test", "_test.py", "_test_gpu", "_test_gpu.py", "_test_lib"
]
ignored_files_count = 0
blacklisted_dependencies_count = len(DEPENDENCY_BLACKLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files_count += 1
# Check if the dependency is in the pip package, the dependency blacklist,
# or should be ignored because of its file extension.
if not (ignore or dependency in pip_package_dependencies_list or
dependency in DEPENDENCY_BLACKLIST):
missing_dependencies.append(dependency)
print("Ignored files count: %d" % ignored_files_count)
print("Blacklisted dependencies count: %d" % blacklisted_dependencies_count)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else either blacklist the dependencies in //tensorflow/tools/pip_package/pip_smoke_test.py
or add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
| tensorflow-master | tensorflow/tools/pip_package/pip_smoke_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| tensorflow-master | tensorflow/tools/pip_package/simple_console.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Auto-detects machine configurations and outputs the results to shell or file.
Supports linux only currently.
Usage:
python config_detector.py [--save_output] [--filename] [--debug]
Example command:
python config_detector.py --save_output=True --filename=configs.json
--debug=False
Flag option(s):
save_output (True | False) Save output to a file.
(Default: True)
filename <file_name>.json Filename(.json) for storing configs.
(Default: `configs.json`)
debug (True | False) View debug and stderr messages.
(Default: False)
The following machine configuration will be detected:
Platform Operating system (linux | macos | windows)
CPU CPU type (e.g. `GenuineIntel`)
CPU architecture Processor type (32-bit | 64-bit)
CPU ISA CPU instruction set (e.g. `sse4`, `sse4_1`, `avx`)
Distribution Operating system distribution (e.g. Ubuntu)
Distribution version Operating system distribution version (e.g. 14.04)
GPU GPU type (e.g. `Tesla K80`)
GPU count Number of GPU's available
CUDA version CUDA version by default (e.g. `10.1`)
CUDA version all CUDA version(s) all available
cuDNN version cuDNN version (e.g. `7.5.0`)
GCC version GCC version (e.g. `7.3.0`)
GLIBC version GLIBC version (e.g. `2.24`)
libstdc++ version libstdc++ version (e.g. `3.4.25`)
Output:
Shell output (print)
A table containing status and info on all configurations will be
printed out to shell.
Configuration file (.json):
Depending on `--save_output` option, this script outputs a .json file
(in the same directory) containing all user machine configurations
that were detected.
"""
# pylint: disable=broad-except
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import subprocess
import sys
from absl import app
from absl import flags
from tensorflow.tools.tensorflow_builder.config_detector.data import cuda_compute_capability
FLAGS = flags.FLAGS
# Define all flags
flags.DEFINE_boolean("save_output", True, "Save output to a file. [True/False]")
flags.DEFINE_string("filename", "configs.json", "Output filename.")
flags.DEFINE_boolean("debug", False, "View debug messages. [True/False]")
# For linux: commands for retrieving user machine configs.
cmds_linux = {
"cpu_type": (
"cat /proc/cpuinfo 2>&1 | grep 'vendor' | uniq"),
"cpu_arch": (
"uname -m"),
"distrib": (
"cat /etc/*-release | grep DISTRIB_ID* | sed 's/^.*=//'"),
"distrib_ver": (
"cat /etc/*-release | grep DISTRIB_RELEASE* | sed 's/^.*=//'"),
"gpu_type": (
"sudo lshw -C display | grep product:* | sed 's/^.*: //'"),
"gpu_type_no_sudo":
r"lspci | grep 'VGA compatible\|3D controller' | cut -d' ' -f 1 | "
r"xargs -i lspci -v -s {} | head -n 2 | tail -1 | "
r"awk '{print $(NF-2), $(NF-1), $NF}'",
"gpu_count": (
"sudo lshw -C display | grep *-display:* | wc -l"),
"gpu_count_no_sudo": (
r"lspci | grep 'VGA compatible\|3D controller' | wc -l"),
"cuda_ver_all": (
"ls -d /usr/local/cuda* 2> /dev/null"),
"cuda_ver_dflt": (
["nvcc --version 2> /dev/null",
"cat /usr/local/cuda/version.txt 2> /dev/null | awk '{print $NF}'"]),
"cudnn_ver": (
["whereis cudnn.h",
"cat `awk '{print $2}'` | grep CUDNN_MAJOR -A 2 | echo "
"`awk '{print $NF}'` | awk '{print $1, $2, $3}' | sed 's/ /./g'"]),
"gcc_ver": (
"gcc --version | awk '{print $NF}' | head -n 1"),
"glibc_ver": (
"ldd --version | tail -n+1 | head -n 1 | awk '{print $NF}'"),
"libstdcpp_ver":
"strings $(/sbin/ldconfig -p | grep libstdc++ | head -n 1 | "
"awk '{print $NF}') | grep LIBCXX | tail -2 | head -n 1",
"cpu_isa": (
"cat /proc/cpuinfo | grep flags | head -n 1"),
}
cmds_all = {
"linux": cmds_linux,
}
# Global variable(s).
PLATFORM = None
GPU_TYPE = None
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/config_detector"
def run_shell_cmd(args):
"""Executes shell commands and returns output.
Args:
args: String of shell commands to run.
Returns:
Tuple output (stdoutdata, stderrdata) from running the shell commands.
"""
proc = subprocess.Popen(
args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return proc.communicate()
def get_platform():
"""Retrieves platform information.
Currently the script only support linux. If other platoforms such as Windows
or MacOS is detected, it throws an error and terminates.
Returns:
String that is platform type.
e.g. 'linux'
"""
global PLATFORM
cmd = "uname"
out, err = run_shell_cmd(cmd)
platform_detected = out.strip().lower()
if platform_detected != "linux":
if err and FLAGS.debug:
print("Error in detecting platform:\n %s" % str(err))
print("Error: Detected unsupported operating system.\nStopping...")
sys.exit(1)
else:
PLATFORM = platform_detected
return PLATFORM
def get_cpu_type():
"""Retrieves CPU (type) information.
Returns:
String that is name of the CPU.
e.g. 'GenuineIntel'
"""
key = "cpu_type"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
cpu_detected = out.split(":")[1].strip()
if err and FLAGS.debug:
print("Error in detecting CPU type:\n %s" % str(err))
return cpu_detected
def get_cpu_arch():
"""Retrieves processor architecture type (32-bit or 64-bit).
Returns:
String that is CPU architecture.
e.g. 'x86_64'
"""
key = "cpu_arch"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting CPU arch:\n %s" % str(err))
return out.strip("\n")
def get_distrib():
"""Retrieves distribution name of the operating system.
Returns:
String that is the name of distribution.
e.g. 'Ubuntu'
"""
key = "distrib"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting distribution:\n %s" % str(err))
return out.strip("\n")
def get_distrib_version():
"""Retrieves distribution version of the operating system.
Returns:
String that is the distribution version.
e.g. '14.04'
"""
key = "distrib_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print(
"Error in detecting distribution version:\n %s" % str(err)
)
return out.strip("\n")
def get_gpu_type():
"""Retrieves GPU type.
Returns:
String that is the name of the detected NVIDIA GPU.
e.g. 'Tesla K80'
'unknown' will be returned if detected GPU type is an unknown name.
Unknown name refers to any GPU name that is not specified in this page:
https://developer.nvidia.com/cuda-gpus
"""
global GPU_TYPE
key = "gpu_type_no_sudo"
gpu_dict = cuda_compute_capability.retrieve_from_golden()
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
ret_val = out.split(" ")
gpu_id = ret_val[0]
if err and FLAGS.debug:
print("Error in detecting GPU type:\n %s" % str(err))
if not isinstance(ret_val, list):
GPU_TYPE = "unknown"
return gpu_id, GPU_TYPE
else:
if "[" or "]" in ret_val[1]:
gpu_release = ret_val[1].replace("[", "") + " "
gpu_release += ret_val[2].replace("]", "").strip("\n")
else:
gpu_release = ret_val[1].replace("\n", " ")
if gpu_release not in gpu_dict:
GPU_TYPE = "unknown"
else:
GPU_TYPE = gpu_release
return gpu_id, GPU_TYPE
def get_gpu_count():
"""Retrieves total number of GPU's available in the system.
Returns:
Integer that is the total # of GPU's found.
"""
key = "gpu_count_no_sudo"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting GPU count:\n %s" % str(err))
return out.strip("\n")
def get_cuda_version_all():
"""Retrieves all additional CUDA versions available (other than default).
For retrieving default CUDA version, use `get_cuda_version` function.
stderr is silenced by default. Setting FLAGS.debug mode will not enable it.
Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable
stderr.
Returns:
List of all CUDA versions found (except default version).
e.g. ['10.1', '10.2']
"""
key = "cuda_ver_all"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
ret_val = out.split("\n")
filtered = []
for item in ret_val:
if item not in ["\n", ""]:
filtered.append(item)
all_vers = []
for item in filtered:
ver_re = re.search(r".*/cuda(\-[\d]+\.[\d]+)?", item)
if ver_re.group(1):
all_vers.append(ver_re.group(1).strip("-"))
if err and FLAGS.debug:
print("Error in detecting CUDA version:\n %s" % str(err))
return all_vers
def get_cuda_version_default():
"""Retrieves default CUDA version.
Default verion is the version found in `/usr/local/cuda/` installation.
stderr is silenced by default. Setting FLAGS.debug mode will not enable it.
Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable
stderr.
It iterates through two types of version retrieval method:
1) Using `nvcc`: If `nvcc` is not available, then it uses next method.
2) Read version file (`version.txt`) found in CUDA install directory.
Returns:
String that is the default CUDA version.
e.g. '10.1'
"""
key = "cuda_ver_dflt"
out = ""
cmd_list = cmds_all[PLATFORM.lower()][key]
for i, cmd in enumerate(cmd_list):
try:
out, err = run_shell_cmd(cmd)
if not out:
raise Exception(err)
except Exception as e:
if FLAGS.debug:
print("\nWarning: Encountered issue while retrieving default CUDA "
"version. (%s) Trying a different method...\n" % e)
if i == len(cmd_list) - 1:
if FLAGS.debug:
print("Error: Cannot retrieve CUDA default version.\nStopping...")
else:
pass
return out.strip("\n")
def get_cuda_compute_capability(source_from_url=False):
"""Retrieves CUDA compute capability based on the detected GPU type.
This function uses the `cuda_compute_capability` module to retrieve the
corresponding CUDA compute capability for the given GPU type.
Args:
source_from_url: Boolean deciding whether to source compute capability
from NVIDIA website or from a local golden file.
Returns:
List of all supported CUDA compute capabilities for the given GPU type.
e.g. ['3.5', '3.7']
"""
if not GPU_TYPE:
if FLAGS.debug:
print("Warning: GPU_TYPE is empty. "
"Make sure to call `get_gpu_type()` first.")
elif GPU_TYPE == "unknown":
if FLAGS.debug:
print("Warning: Unknown GPU is detected. "
"Skipping CUDA compute capability retrieval.")
else:
if source_from_url:
cuda_compute_capa = cuda_compute_capability.retrieve_from_web()
else:
cuda_compute_capa = cuda_compute_capability.retrieve_from_golden()
return cuda_compute_capa[GPU_TYPE]
return
def get_cudnn_version():
"""Retrieves the version of cuDNN library detected.
Returns:
String that is the version of cuDNN library detected.
e.g. '7.5.0'
"""
key = "cudnn_ver"
cmds = cmds_all[PLATFORM.lower()][key]
out, err = run_shell_cmd(cmds[0])
if err and FLAGS.debug:
print("Error in finding `cudnn.h`:\n %s" % str(err))
if len(out.split(" ")) > 1:
cmd = cmds[0] + " | " + cmds[1]
out_re, err_re = run_shell_cmd(cmd)
if err_re and FLAGS.debug:
print("Error in detecting cuDNN version:\n %s" % str(err_re))
return out_re.strip("\n")
else:
return
def get_gcc_version():
"""Retrieves version of GCC detected.
Returns:
String that is the version of GCC.
e.g. '7.3.0'
"""
key = "gcc_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting GCC version:\n %s" % str(err))
return out.strip("\n")
def get_glibc_version():
"""Retrieves version of GLIBC detected.
Returns:
String that is the version of GLIBC.
e.g. '2.24'
"""
key = "glibc_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting GCC version:\n %s" % str(err))
return out.strip("\n")
def get_libstdcpp_version():
"""Retrieves version of libstdc++ detected.
Returns:
String that is the version of libstdc++.
e.g. '3.4.25'
"""
key = "libstdcpp_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting libstdc++ version:\n %s" % str(err))
ver = out.split("_")[-1].replace("\n", "")
return ver
def get_cpu_isa_version():
"""Retrieves all Instruction Set Architecture(ISA) available.
Required ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1'
Returns:
Tuple
(list of available ISA, list of missing ISA)
"""
key = "cpu_isa"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting supported ISA:\n %s" % str(err))
ret_val = out
required_isa = ["avx", "avx2", "avx512f", "sse4", "sse4_1"]
found = []
missing = []
for isa in required_isa:
for sys_isa in ret_val.split(" "):
if isa == sys_isa:
if isa not in found:
found.append(isa)
missing = list(set(required_isa) - set(found))
return found, missing
def get_python_version():
"""Retrieves default Python version.
Returns:
String that is the version of default Python.
e.g. '2.7.4'
"""
ver = str(sys.version_info)
mmm = re.search(r".*major=([\d]), minor=([\d]), micro=([\d]+),.*", ver)
return mmm.group(1) + "." + mmm.group(2) + "." + mmm.group(3)
def get_all_configs():
"""Runs all functions for detecting user machine configurations.
Returns:
Tuple
(List of all configurations found,
List of all missing configurations,
List of all configurations found with warnings,
Dict of all configurations)
"""
all_functions = collections.OrderedDict(
[("Platform", get_platform()),
("CPU", get_cpu_type()),
("CPU arch", get_cpu_arch()),
("Distribution", get_distrib()),
("Distribution version", get_distrib_version()),
("GPU", get_gpu_type()[1]),
("GPU count", get_gpu_count()),
("CUDA version (default)", get_cuda_version_default()),
("CUDA versions (all)", get_cuda_version_all()),
("CUDA compute capability",
get_cuda_compute_capability(get_gpu_type()[1])),
("cuDNN version", get_cudnn_version()),
("GCC version", get_gcc_version()),
("Python version (default)", get_python_version()),
("GNU C Lib (glibc) version", get_glibc_version()),
("libstdc++ version", get_libstdcpp_version()),
("CPU ISA (min requirement)", get_cpu_isa_version())]
)
configs_found = []
json_data = {}
missing = []
warning = []
for config, call_func in all_functions.iteritems():
ret_val = call_func
if not ret_val:
configs_found.append([config, "\033[91m\033[1mMissing\033[0m"])
missing.append([config])
json_data[config] = ""
elif ret_val == "unknown":
configs_found.append([config, "\033[93m\033[1mUnknown type\033[0m"])
warning.append([config, ret_val])
json_data[config] = "unknown"
else:
if "ISA" in config:
if not ret_val[1]:
# Not missing any required ISA
configs_found.append([config, ret_val[0]])
json_data[config] = ret_val[0]
else:
configs_found.append(
[config,
"\033[91m\033[1mMissing " + str(ret_val[1])[1:-1] + "\033[0m"]
)
missing.append(
[config,
"\n\t=> Found %s but missing %s"
% (str(ret_val[0]), str(ret_val[1]))]
)
json_data[config] = ret_val[0]
else:
configs_found.append([config, ret_val])
json_data[config] = ret_val
return (configs_found, missing, warning, json_data)
def print_all_configs(configs, missing, warning):
"""Prints the status and info on all configurations in a table format.
Args:
configs: List of all configurations found.
missing: List of all configurations that are missing.
warning: List of all configurations found with warnings.
"""
print_text = ""
llen = 65 # line length
for i, row in enumerate(configs):
if i != 0:
print_text += "-"*llen + "\n"
if isinstance(row[1], list):
val = ", ".join(row[1])
else:
val = row[1]
print_text += " {: <28}".format(row[0]) + " {: <25}".format(val) + "\n"
print_text += "="*llen
print("\n\n {: ^32} {: ^25}".format("Configuration(s)",
"Detected value(s)"))
print("="*llen)
print(print_text)
if missing:
print("\n * ERROR: The following configurations are missing:")
for m in missing:
print(" ", *m)
if warning:
print("\n * WARNING: The following configurations could cause issues:")
for w in warning:
print(" ", *w)
if not missing and not warning:
print("\n * INFO: Successfully found all configurations.")
print("\n")
def save_to_file(json_data, filename):
"""Saves all detected configuration(s) into a JSON file.
Args:
json_data: Dict of all configurations found.
filename: String that is the name of the output JSON file.
"""
if filename[-5:] != ".json":
print("filename: %s" % filename)
filename += ".json"
with open(PATH_TO_DIR + "/" + filename, "w") as f:
json.dump(json_data, f, sort_keys=True, indent=4)
print(" Successfully wrote configs to file `%s`.\n" % (filename))
def manage_all_configs(save_results, filename):
"""Manages configuration detection and retrieval based on user input.
Args:
save_results: Boolean indicating whether to save the results to a file.
filename: String that is the name of the output JSON file.
"""
# Get all configs
all_configs = get_all_configs()
# Print all configs based on user input
print_all_configs(all_configs[0], all_configs[1], all_configs[2])
# Save all configs to a file based on user request
if save_results:
save_to_file(all_configs[3], filename)
def main(argv):
if len(argv) > 3:
raise app.UsageError("Too many command-line arguments.")
manage_all_configs(
save_results=FLAGS.save_output,
filename=FLAGS.filename,
)
if __name__ == "__main__":
app.run(main)
| tensorflow-master | tensorflow/tools/tensorflow_builder/config_detector/config_detector.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Retrieves CUDA compute capability from NVIDIA webpage and creates a `.csv`.
This module is mainly written to supplement for `../config_detector.py`
which retrieves CUDA compute capability from existing golden file.
The golden file resides inside `./golden` directory.
Usage:
python cuda_compute_capability.py
Output:
Creates `compute_capability.csv` file in the same directory by default. If
the file already exists, then it overwrites the file.
In order to use the new `.csv` as the golden, then it should replace the
original golden file (`./golden/compute_capability_golden.csv`) with the
same file name and path.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import difflib
import os
import re
from absl import app
from absl import flags
import six.moves.urllib.request as urllib
FLAGS = flags.FLAGS
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/config_detector"
CUDA_CC_GOLDEN_DIR = PATH_TO_DIR + "/data/golden/compute_capability_golden.csv"
def retrieve_from_web(generate_csv=False):
"""Retrieves list of all CUDA compute capability from NVIDIA webpage.
Args:
generate_csv: Boolean for generating an output file containing
the results.
Returns:
OrderedDict that is a list of all CUDA compute capability listed on the
NVIDIA page. Order goes from top to bottom of the webpage content (.html).
"""
url = "https://developer.nvidia.com/cuda-gpus"
source = urllib.urlopen(url)
matches = []
while True:
line = source.readline()
if "</html>" in line:
break
else:
gpu = re.search(
r"<a href=.*>([\w\S\s\d\[\]\,]+[^*])</a>(<a href=.*)?.*",
line
)
capability = re.search(
r"([\d]+).([\d]+)(/)?([\d]+)?(.)?([\d]+)?.*</td>.*",
line
)
if gpu:
matches.append(gpu.group(1))
elif capability:
if capability.group(3):
capability_str = capability.group(4) + "." + capability.group(6)
else:
capability_str = capability.group(1) + "." + capability.group(2)
matches.append(capability_str)
return create_gpu_capa_map(matches, generate_csv)
def retrieve_from_golden():
"""Retrieves list of all CUDA compute capability from a golden file.
The following file is set as default:
`./golden/compute_capability_golden.csv`
Returns:
Dictionary that lists of all CUDA compute capability in the following
format:
{'<GPU name>': ['<version major>.<version minor>', ...], ...}
If there are multiple versions available for a given GPU, then it
appends all supported versions in the value list (in the key-value
pair.)
"""
out_dict = dict()
with open(CUDA_CC_GOLDEN_DIR) as g_file:
for line in g_file:
line_items = line.split(",")
val_list = []
for item in line_items[1:]:
val_list.append(item.strip("\n"))
out_dict[line_items[0]] = val_list
return out_dict
def create_gpu_capa_map(match_list,
generate_csv=False,
filename="compute_capability"):
"""Generates a map between GPU types and corresponding compute capability.
This method is used for retrieving CUDA compute capability from the web only.
Args:
match_list: List of all CUDA compute capability detected from the webpage.
generate_csv: Boolean for creating csv file to store results.
filename: String that is the name of the csv file (without `.csv` ending).
Returns:
OrderedDict that lists in the incoming order of all CUDA compute capability
provided as `match_list`.
"""
gpu_capa = collections.OrderedDict()
include = False
gpu = ""
cnt = 0
mismatch_cnt = 0
for match in match_list:
if "Products" in match:
if not include:
include = True
continue
elif "www" in match:
include = False
break
if include:
if gpu:
if gpu in gpu_capa:
gpu_capa[gpu].append(match)
else:
gpu_capa[gpu] = [match]
gpu = ""
cnt += 1
if len(gpu_capa.keys()) < cnt:
mismatch_cnt += 1
cnt = len(gpu_capa.keys())
else:
gpu = match
if generate_csv:
f_name = filename + ".csv"
write_csv_from_dict(f_name, gpu_capa)
return gpu_capa
def write_csv_from_dict(filename, input_dict):
"""Writes out a `.csv` file from an input dictionary.
After writing out the file, it checks the new list against the golden
to make sure golden file is up-to-date.
Args:
filename: String that is the output file name.
input_dict: Dictionary that is to be written out to a `.csv` file.
"""
f = open(PATH_TO_DIR + "/data/" + filename, "w")
for k, v in input_dict.iteritems():
line = k
for item in v:
line += "," + item
f.write(line + "\n")
f.flush()
print("Wrote to file %s" % filename)
check_with_golden(filename)
def check_with_golden(filename):
"""Checks the newly created CUDA compute capability file with the golden.
If differences are found, then it prints a list of all mismatches as
a `WARNING`.
Golden file must reside in `golden/` directory.
Args:
filename: String that is the name of the newly created file.
"""
path_to_file = PATH_TO_DIR + "/data/" + filename
if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR):
with open(path_to_file, "r") as f_new:
with open(CUDA_CC_GOLDEN_DIR, "r") as f_golden:
diff = difflib.unified_diff(
f_new.readlines(),
f_golden.readlines(),
fromfile=path_to_file,
tofile=CUDA_CC_GOLDEN_DIR
)
diff_list = []
for line in diff:
diff_list.append(line)
if diff_list:
print("WARNING: difference(s) found between new csv and golden csv.")
print(diff_list)
else:
print("No difference found between new csv and golen csv.")
def print_dict(py_dict):
"""Prints dictionary with formatting (2 column table).
Args:
py_dict: Dictionary that is to be printed out in a table format.
"""
for gpu, cc in py_dict.items():
print("{:<25}{:<25}".format(gpu, cc))
def main(argv):
if len(argv) > 2:
raise app.UsageError("Too many command-line arguments.")
retrieve_from_web(generate_csv=True)
if __name__ == "__main__":
app.run(main)
| tensorflow-master | tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py |
tensorflow-master | tensorflow/tools/tensorflow_builder/config_detector/data/__init__.py |
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Checks if a set of configuration(s) is version and dependency compatible."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
if six.PY2:
import ConfigParser
else:
import configparser as ConfigParser
# pylint: enable=g-import-not-at-top
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/compat_checker"
def _compare_versions(v1, v2):
"""Compare two versions and return information on which is smaller vs. larger.
Args:
v1: String that is a version to be compared against `v2`.
v2: String that is a version to be compared against `v1`.
Returns:
Dict that stores larger version with key `larger` and smaller version with
key `smaller`.
e.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`}
Raises:
RuntimeError: If asked to compare `inf` to `inf`.
"""
# Throw error is asked to compare `inf` to `inf`.
if v1 == "inf" and v2 == "inf":
raise RuntimeError("Cannot compare `inf` to `inf`.")
rtn_dict = {"smaller": None, "larger": None}
v1_list = v1.split(".")
v2_list = v2.split(".")
# Take care of cases with infinity (arg=`inf`).
if v1_list[0] == "inf":
v1_list[0] = str(int(v2_list[0]) + 1)
if v2_list[0] == "inf":
v2_list[0] = str(int(v1_list[0]) + 1)
# Determine which of the two lists are longer vs. shorter.
v_long = v1_list if len(v1_list) >= len(v2_list) else v2_list
v_short = v1_list if len(v1_list) < len(v2_list) else v2_list
larger, smaller = None, None
for i, ver in enumerate(v_short, start=0):
if int(ver) > int(v_long[i]):
larger = _list_to_string(v_short, ".")
smaller = _list_to_string(v_long, ".")
elif int(ver) < int(v_long[i]):
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
if i == len(v_short) - 1:
if v_long[i + 1:] == ["0"]*(len(v_long) - 1 - i):
larger = "equal"
smaller = "equal"
else:
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
# Go to next round.
pass
if larger:
break
rtn_dict["smaller"] = smaller
rtn_dict["larger"] = larger
return rtn_dict
def _list_to_string(l, s):
"""Concatenates list items into a single string separated by `s`.
Args:
l: List with items to be concatenated into a single string.
s: String or char that will be concatenated in between each item.
Returns:
String that has all items in list `l` concatenated with `s` separator.
"""
return s.join(l)
def _get_func_name():
"""Get the name of current function.
Returns:
String that is the name of current function.
"""
return tf_inspect.stack()[1][3]
class ConfigCompatChecker(object):
"""Class that checks configuration versions and depencency compatibilities.
`ConfigCompatChecker` checks a given set of configurations and their versions
against supported versions and dependency rules defined in `.ini` config file.
For project `TensorFlow Builder`, it functions as a sub-module for the builder
service that validates requested build configurations from a client prior to
initiating a TensorFlow build.
"""
class _Reqs(object):
"""Class that stores specifications related to a single requirement.
`_Reqs` represents a single version or dependency requirement specified in
the `.ini` config file. It is meant ot be used inside `ConfigCompatChecker`
to help organize and identify version and dependency compatibility for a
given configuration (e.g. gcc version) required by the client.
"""
def __init__(self, req, config, section):
"""Initializes a version or dependency requirement object.
Args:
req: List that contains individual supported versions or a single string
that contains `range` definition.
e.g. [`range(1.0, 2.0) include(3.0) exclude(1.5)`]
e.g. [`1.0`, `3.0`, `7.1`]
config: String that is the configuration name.
e.g. `platform`
section: String that is the section name from the `.ini` config file
under which the requirement is defined.
e.g. `Required`, `Optional`, `Unsupported`, `Dependency`
"""
# Req class variables.
self.req = req
self.exclude = None
self.include = None
self.range = [None, None] # for [min, max]
self.config = config
self._req_type = "" # e.g. `range` or `no_range`
self._section = section
self._initialized = None
self._error_message = []
# Parse and store requirement specifications.
self.parse_single_req()
@property
def get_status(self):
"""Get status of `_Reqs` initialization.
Returns:
Tuple
(Boolean indicating initialization status,
List of error messages, if any)
"""
return self._initialized, self._error_message
def __str__(self):
"""Prints a requirement and its components.
Returns:
String that has concantenated information about a requirement.
"""
info = {
"section": self._section,
"config": self.config,
"req_type": self._req_type,
"req": str(self.req),
"range": str(self.range),
"exclude": str(self.exclude),
"include": str(self.include),
"init": str(self._initialized)
}
req_str = "\n >>> _Reqs Instance <<<\n"
req_str += "Section: {section}\n"
req_str += "Configuration name: {config}\n"
req_str += "Requirement type: {req_type}\n"
req_str += "Requirement: {req}\n"
req_str += "Range: {range}\n"
req_str += "Exclude: {exclude}\n"
req_str += "Include: {include}\n"
req_str += "Initilalized: {init}\n\n"
return req_str.format(**info)
def parse_single_req(self):
"""Parses a requirement and stores information.
`self.req` _initialized in `__init__` is called for retrieving the
requirement.
A requirement can come in two forms:
[1] String that includes `range` indicating range syntax for defining
a requirement.
e.g. `range(1.0, 2.0) include(3.0) exclude(1.5)`
[2] List that includes inidividual supported versions or items.
e.g. [`1.0`, `3.0`, `7.1`]
For a list type requirement, it directly stores the list to
`self.include`.
Call `get_status` for checking the status of the parsing. This function
sets `self._initialized` to `False` and immediately returns with an error
message upon encountering a failure. It sets `self._initialized` to `True`
and returns without an error message upon success.
"""
# Regex expression for filtering requirement line. Please refer
# to docstring above for more information.
expr = r"(range\()?([\d\.\,\s]+)(\))?( )?(include\()?"
expr += r"([\d\.\,\s]+)?(\))?( )?(exclude\()?([\d\.\,\s]+)?(\))?"
# Check that arg `req` is not empty.
if not self.req:
err_msg = "[Error] Requirement is missing. "
err_msg += "(section = %s, " % str(self._section)
err_msg += "config = %s, req = %s)" % (str(self.config), str(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# For requirement given in format with `range`. For example:
# python = [range(3.3, 3.7) include(2.7)] as opposed to
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7]
if "range" in self.req[0]:
self._req_type = "range"
match = re.match(expr, self.req[0])
if not match:
err_msg = "[Error] Encountered issue when parsing the requirement."
err_msg += " (req = %s, match = %s)" % (str(self.req), str(match))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
else:
match_grp = match.groups()
match_size = len(match_grp)
for i, m in enumerate(match_grp[0:match_size-1], start=0):
# Get next index. For example:
# | idx | next_idx |
# +------------+------------+
# | `range(` | `1.1, 1.5` |
# | `exclude(` | `1.1, 1.5` |
# | `include(` | `1.1, 1.5` |
next_match = match_grp[i + 1]
if m not in ["", None, " ", ")"]:
if "range" in m:
# Check that the range definition contains only one comma.
# If more than one comma, then there is format error with the
# requirement config file.
comma_count = next_match.count(",")
if comma_count > 1 or comma_count == 0:
err_msg = "[Error] Found zero or more than one comma in range"
err_msg += " definition. (req = %s, " % str(self.req)
err_msg += "match = %s)" % str(next_match)
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# Remove empty space in range and separate min, max by
# comma. (e.g. `1.0, 2.0` => `1.0,2.0` => [`1.0`, `2.0`])
min_max = next_match.replace(" ", "").split(",")
# Explicitly define min and max values.
# If min_max = ['', ''], then `range(, )` was provided as
# req, which is equivalent to `include all versions`.
if not min_max[0]:
min_max[0] = "0"
if not min_max[1]:
min_max[1] = "inf"
self.range = min_max
if "exclude" in m:
self.exclude = next_match.replace(" ", "").split(",")
if "include" in m:
self.include = next_match.replace(" ", "").split(",")
self._initialized = True
# For requirement given in format without a `range`. For example:
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7] as opposed to
# python = [range(3.3, 3.7) include(2.7)]
else:
self._req_type = "no_range"
# Requirement (self.req) should be a list.
if not isinstance(self.req, list):
err_msg = "[Error] Requirement is not a list."
err_msg += "(req = %s, " % str(self.req)
err_msg += "type(req) = %s)" % str(type(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
else:
self.include = self.req
self._initialized = True
return
def __init__(self, usr_config, req_file):
"""Initializes a configuration compatibility checker.
Args:
usr_config: Dict of all configuration(s) whose version compatibilities are
to be checked against the rules defined in the `.ini` config
file.
req_file: String that is the full name of the `.ini` config file.
e.g. `config.ini`
"""
# ConfigCompatChecker class variables.
self.usr_config = usr_config
self.req_file = req_file
self.warning_msg = []
self.error_msg = []
# Get and store requirements.
reqs_all = self.get_all_reqs()
self.required = reqs_all["required"]
self.optional = reqs_all["optional"]
self.unsupported = reqs_all["unsupported"]
self.dependency = reqs_all["dependency"]
self.successes = []
self.failures = []
def get_all_reqs(self):
"""Parses all compatibility specifications listed in the `.ini` config file.
Reads and parses each and all compatibility specifications from the `.ini`
config file by sections. It then populates appropriate dicts that represent
each section (e.g. `self.required`) and returns a tuple of the populated
dicts.
Returns:
Dict of dict
{ `required`: Dict of `Required` configs and supported versions,
`optional`: Dict of `Optional` configs and supported versions,
`unsupported`: Dict of `Unsupported` configs and supported versions,
`dependency`: Dict of `Dependency` configs and supported versions }
"""
# First check if file exists. Exit on failure.
try:
open(self.req_file, "rb")
except IOError:
msg = "[Error] Cannot read file '%s'." % self.req_file
logging.error(msg)
sys.exit(1)
# Store status of parsing requirements. For local usage only.
curr_status = True
# Initialize config parser for parsing version requirements file.
parser = ConfigParser.ConfigParser()
parser.read(self.req_file)
if not parser.sections():
err_msg = "[Error] Empty confie file. "
err_msg += "(file = %s, " % str(self.req_file)
err_msg += "parser sectons = %s)" % str(parser.sections())
self.error_msg.append(err_msg)
logging.error(err_msg)
curr_status = False
# Each dependency dict will have the following format.
# _dict = {
# `<config_name>` : [_Reqs()],
# `<config_name>` : [_Reqs()]
# }
required_dict = {}
optional_dict = {}
unsupported_dict = {}
dependency_dict = {}
# Parse every config under each section defined in config file
# and populate requirement dict(s).
for section in parser.sections():
all_configs = parser.options(section)
for config in all_configs:
spec = parser.get(section, config)
# Separately manage each section:
# `Required`,
# `Optional`,
# `Unsupported`,
# `Dependency`
# One of the sections is required.
if section == "Dependency":
dependency_dict[config] = []
spec_split = spec.split(",\n")
# First dependency item may only or not have `[` depending
# on the indentation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[0] == "[":
spec_split = spec_split[1:]
elif "[" in spec_split[0]:
spec_split[0] = spec_split[0].replace("[", "")
else:
warn_msg = "[Warning] Config file format error: Missing `[`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Last dependency item may only or not have `]` depending
# on the identation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[-1] == "]":
spec_split = spec_split[:-1]
elif "]" in spec_split[-1]:
spec_split[-1] = spec_split[-1].replace("]", "")
else:
warn_msg = "[Warning] Config file format error: Missing `]`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Parse `spec_split` which is a list of all dependency rules
# retrieved from the config file.
# Create a _Reqs() instance for each rule and store it under
# appropriate class dict (e.g. dependency_dict) with a proper
# key.
#
# For dependency definition, it creates one _Reqs() instance each
# for requirement and dependency. For example, it would create
# a list in the following indexing sequence:
#
# [`config', <`config` _Reqs()>, `dep', <`dep` _Reqs()>]
#
# For example:
# [`python`, _Reqs(), `tensorflow`, _Reqs()] for
# `python 3.7 requires tensorflow 1.13`
for rule in spec_split:
# Filter out only the necessary information from `rule` string.
spec_dict = self.filter_dependency(rule)
# Create _Reqs() instance for each rule.
cfg_name = spec_dict["cfg"] # config name
dep_name = spec_dict["cfgd"] # dependency name
cfg_req = self._Reqs(
self.convert_to_list(spec_dict["cfg_spec"], " "),
config=cfg_name,
section=section
)
dep_req = self._Reqs(
self.convert_to_list(spec_dict["cfgd_spec"], " "),
config=dep_name,
section=section
)
# Check status of _Reqs() initialization. If wrong formats are
# detected from the config file, it would return `False` for
# initialization status.
# `<_Reqs>.get_status` returns [_initialized, _error_message]
cfg_req_status = cfg_req.get_status
dep_req_status = dep_req.get_status
if not cfg_req_status[0] or not dep_req_status[0]:
# `<_Reqs>.get_status()[1]` returns empty upon successful init.
msg = "[Error] Failed to create _Reqs() instance for a "
msg += "dependency item. (config = %s, " % str(cfg_name)
msg += "dep = %s)" % str(dep_name)
logging.error(msg)
self.error_msg.append(cfg_req_status[1])
self.error_msg.append(dep_req_status[1])
curr_status = False
break
else:
dependency_dict[config].append(
[cfg_name, cfg_req, dep_name, dep_req])
# Break out of `if section == 'Dependency'` block.
if not curr_status:
break
else:
if section == "Required":
add_to = required_dict
elif section == "Optional":
add_to = optional_dict
elif section == "Unsupported":
add_to = unsupported_dict
else:
msg = "[Error] Section name `%s` is not accepted." % str(section)
msg += "Accepted section names are `Required`, `Optional`, "
msg += "`Unsupported`, and `Dependency`."
logging.error(msg)
self.error_msg.append(msg)
curr_status = False
break
# Need to make sure `req` argument for _Reqs() instance is always
# a list. If not, convert to list.
req_list = self.convert_to_list(self.filter_line(spec), " ")
add_to[config] = self._Reqs(req_list, config=config, section=section)
# Break out of `for config in all_configs` loop.
if not curr_status:
break
# Break out of `for section in parser.sections()` loop.
if not curr_status:
break
return_dict = {
"required": required_dict,
"optional": optional_dict,
"unsupported": unsupported_dict,
"dependency": dependency_dict
}
return return_dict
def filter_dependency(self, line):
"""Filters dependency compatibility rules defined in the `.ini` config file.
Dependency specifications are defined as the following:
`<config> <config_version> requires <dependency> <dependency_version>`
e.g.
`python 3.7 requires tensorflow 1.13`
`tensorflow range(1.0.0, 1.13.1) requires gcc range(4.8, )`
Args:
line: String that is a dependency specification defined under `Dependency`
section in the `.ini` config file.
Returns:
Dict with configuration and its dependency information.
e.g. {`cfg`: `python`, # configuration name
`cfg_spec`: `3.7`, # configuration version
`cfgd`: `tensorflow`, # dependency name
`cfgd_spec`: `4.8`} # dependency version
"""
line = line.strip("\n")
expr = r"(?P<cfg>[\S]+) (?P<cfg_spec>range\([\d\.\,\s]+\)( )?"
expr += r"(include\([\d\.\,\s]+\))?( )?(exclude\([\d\.\,\s]+\))?( )?"
expr += r"|[\d\,\.\s]+) requires (?P<cfgd>[\S]+) (?P<cfgd_spec>range"
expr += r"\([\d\.\,\s]+\)( )?(include\([\d\.\,\s]+\))?( )?"
expr += r"(exclude\([\d\.\,\s]+\))?( )?|[\d\,\.\s]+)"
r = re.match(expr, line.strip("\n"))
return r.groupdict()
def convert_to_list(self, item, separator):
"""Converts a string into a list with a separator.
Args:
item: String that needs to be separated into a list by a given separator.
List item is also accepted but will take no effect.
separator: String with which the `item` will be splited.
Returns:
List that is a splited version of a given input string.
e.g. Input: `1.0, 2.0, 3.0` with `, ` separator
Output: [1.0, 2.0, 3.0]
"""
out = None
if not isinstance(item, list):
if "range" in item:
# If arg `item` is a single string, then create a list with just
# the item.
out = [item]
else:
# arg `item` can come in as the following:
# `1.0, 1.1, 1.2, 1.4`
# if requirements were defined without the `range()` format.
# In such a case, create a list separated by `separator` which is
# an empty string (' ') in this case.
out = item.split(separator)
for i in range(len(out)):
out[i] = out[i].replace(",", "")
# arg `item` is a list already.
else:
out = [item]
return out
def filter_line(self, line):
"""Removes `[` or `]` from the input line.
Args:
line: String that is a compatibility specification line from the `.ini`
config file.
Returns:
String that is a compatibility specification line without `[` and `]`.
"""
filtered = []
warn_msg = []
splited = line.split("\n")
# If arg `line` is empty, then requirement might be missing. Add
# to warning as this issue will be caught in _Reqs() initialization.
if not line and len(splited) < 1:
warn_msg = "[Warning] Empty line detected while filtering lines."
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, first line in requirement definition will include `[`
# in the config file (.ini). Remove it.
if splited[0] == "[":
filtered = splited[1:]
elif "[" in splited[0]:
splited = splited[0].replace("[", "")
filtered = splited
# If `[` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `[` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, last line in requirement definition will include `]`
# in the config file (.ini). Remove it.
if filtered[-1] == "]":
filtered = filtered[:-1]
elif "]" in filtered[-1]:
filtered[-1] = filtered[-1].replace("]", "")
# If `]` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `]` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
return filtered
def in_range(self, ver, req):
"""Checks if a version satisfies a version and/or compatibility requirement.
Args:
ver: List whose first item is a config version that needs to be checked
for support status and version compatibility.
e.g. ver = [`1.0`]
req: `_Reqs` class instance that represents a configuration version and
compatibility specifications.
Returns:
Boolean output of checking if version `ver` meets the requirement
stored in `req` (or a `_Reqs` requirements class instance).
"""
# If `req.exclude` is not empty and `ver` is in `req.exclude`,
# no need to proceed to next set of checks as it is explicitly
# NOT supported.
if req.exclude is not None:
for v in ver:
if v in req.exclude:
return False
# If `req.include` is not empty and `ver` is in `req.include`,
# no need to proceed to next set of checks as it is supported and
# NOT unsupported (`req.exclude`).
include_checked = False
if req.include is not None:
for v in ver:
if v in req.include:
return True
include_checked = True
# If `req.range` is not empty, then `ver` is defined with a `range`
# syntax. Check whether `ver` falls under the defined supported
# range.
if req.range != [None, None]:
min_v = req.range[0] # minimum supported version
max_v = req.range[1] # maximum supported version
ver = ver[0] # version to compare
lg = _compare_versions(min_v, ver)["larger"] # `ver` should be larger
sm = _compare_versions(ver, max_v)["smaller"] # `ver` should be smaller
if lg in [ver, "equal"] and sm in [ver, "equal", "inf"]:
return True
else:
err_msg = "[Error] Version is outside of supported range. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
else:
err_msg = ""
if include_checked:
# user config is not supported as per exclude, include, range
# specification.
err_msg = "[Error] Version is outside of supported range. "
else:
# user config is not defined in exclude, include or range. config file
# error.
err_msg = "[Error] Missing specification. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
def _print(self, *args):
"""Prints compatibility check status and failure or warning messages.
Prints to console without using `logging`.
Args:
*args: String(s) that is one of:
[`failures`, # all failures
`successes`, # all successes
`failure_msgs`, # failure message(s) recorded upon failure(s)
`warning_msgs`] # warning message(s) recorded upon warning(s)
Raises:
Exception: If *args not in:
[`failures`, `successes`, `failure_msgs`, `warning_msg`]
"""
def _format(name, arr):
"""Prints compatibility check results with a format.
Args:
name: String that is the title representing list `arr`.
arr: List of items to be printed in a certain format.
"""
title = "### All Compatibility %s ###" % str(name)
tlen = len(title)
print("-"*tlen)
print(title)
print("-"*tlen)
print(" Total # of %s: %s\n" % (str(name), str(len(arr))))
if arr:
for item in arr:
detail = ""
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ", "
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(" No %s" % name)
print("\n")
for p_item in args:
if p_item == "failures":
_format("Failures", self.failures)
elif p_item == "successes":
_format("Successes", self.successes)
elif p_item == "failure_msgs":
_format("Failure Messages", self.error_msg)
elif p_item == "warning_msgs":
_format("Warning Messages", self.warning_msg)
else:
raise Exception(
"[Error] Wrong input provided for %s." % _get_func_name())
def check_compatibility(self):
"""Checks version and dependency compatibility for a given configuration.
`check_compatibility` immediately returns with `False` (or failure status)
if any child process or checks fail. For error and warning messages, either
print `self.(error_msg|warning_msg)` or call `_print` function.
Returns:
Boolean that is a status of the compatibility check result.
"""
# Check if all `Required` configs are found in user configs.
usr_keys = self.usr_config.keys()
for k in six.iterkeys(self.usr_config):
if k not in usr_keys:
err_msg = "[Error] Required config not found in user config."
err_msg += "(required = %s, " % str(k)
err_msg += "user configs = %s)" % str(usr_keys)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([k, err_msg])
return False
# Parse each user config and validate its compatibility.
overall_status = True
for config_name, spec in six.iteritems(self.usr_config):
temp_status = True
# Check under which section the user config is defined.
in_required = config_name in self.required.keys()
in_optional = config_name in self.optional.keys()
in_unsupported = config_name in self.unsupported.keys()
in_dependency = config_name in self.dependency.keys()
# Add to warning if user config is not specified in the config file.
if not (in_required or in_optional or in_unsupported or in_dependency):
warn_msg = "[Error] User config not defined in config file."
warn_msg += "(user config = %s)" % str(config_name)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
self.failures.append([config_name, warn_msg])
temp_status = False
else:
if in_unsupported:
if self.in_range(spec, self.unsupported[config_name]):
err_msg = "[Error] User config is unsupported. It is "
err_msg += "defined under 'Unsupported' section in the config file."
err_msg += " (config = %s, spec = %s)" % (config_name, str(spec))
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_required:
if not self.in_range(spec, self.required[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Required' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_optional:
if not self.in_range(spec, self.optional[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Optional' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# If user config and version has a dependency, check both user
# config + version and dependency config + version are supported.
if in_dependency:
# Get dependency information. The information gets retrieved in the
# following format:
# [`config`, `config _Reqs()`, `dependency`, `dependency _Reqs()`]
dep_list = self.dependency[config_name]
if dep_list:
for rule in dep_list:
cfg = rule[0] # config name
cfg_req = rule[1] # _Reqs() instance for config requirement
dep = rule[2] # dependency name
dep_req = rule[3] # _Reqs() instance for dependency requirement
# Check if user config has a dependency in the following sequence:
# [1] Check user config and the config that has dependency
# are the same. (This is defined as `cfg_status`.)
# [2] Check if dependency is supported.
try:
cfg_name = self.usr_config[cfg]
dep_name = self.usr_config[dep]
cfg_status = self.in_range(cfg_name, cfg_req)
dep_status = self.in_range(dep_name, dep_req)
# If both status's are `True`, then user config meets dependency
# spec.
if cfg_status:
if not dep_status:
# throw error
err_msg = "[Error] User config has a dependency that cannot"
err_msg += " be supported. "
err_msg += "'%s' has a dependency on " % str(config_name)
err_msg += "'%s'." % str(dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
except KeyError:
err_msg = "[Error] Dependency is missing from `Required`. "
err_msg += "(config = %s, ""dep = %s)" % (cfg, dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# At this point, all requirement related to the user config has been
# checked and passed. Append to `successes` list.
if temp_status:
self.successes.append([config_name, spec])
else:
overall_status = False
return overall_status
| tensorflow-master | tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Tests for version compatibility checker for TensorFlow Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from tensorflow.tools.tensorflow_builder.compat_checker import compat_checker
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/compat_checker"
USER_CONFIG_IN_RANGE = {
"apple": ["1.0"],
"banana": ["3"],
"kiwi": ["2.0"],
"watermelon": ["2.0.0"],
"orange": ["4.1"],
"cherry": ["1.5"],
"cranberry": ["1.0"],
"raspberry": ["3.0"],
"tangerine": ["2.0.0"],
"jackfruit": ["1.0"],
"grapefruit": ["2.0"],
"apricot": ["wind", "flower"],
"grape": ["7.1"],
"blueberry": ["3.0"]
}
USER_CONFIG_NOT_IN_RANGE = {
"apple": ["4.0"],
"banana": ["5"],
"kiwi": ["3.5"],
"watermelon": ["5.0"],
"orange": ["3.5"],
"cherry": ["2.0"],
"raspberry": ["-1"],
"cranberry": ["4.5"],
"tangerine": ["0"],
"jackfruit": ["5.0"],
"grapefruit": ["2.5"],
"apricot": ["hello", "world"],
"blueberry": ["11.0"],
"grape": ["7.0"],
"cantaloupe": ["11.0"]
}
USER_CONFIG_MISSING = {
"avocado": ["3.0"],
"apple": [],
"banana": ""
}
class CompatCheckerTest(unittest.TestCase):
def setUp(self):
"""Set up test."""
super(CompatCheckerTest, self).setUp()
self.test_file = os.path.join(PATH_TO_DIR, "test_config.ini")
def testDown(self):
"""Tear down test."""
super(CompatCheckerTest, self).tearDown()
def testWithUserConfigInRange(self):
"""Test a set of configs that are supported.
Testing with the following combination should always return `success`:
[1] A set of configurations that are supported and/or compatible.
[2] `.ini` config file with proper formatting.
"""
# Initialize compatibility checker.
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_IN_RANGE, self.test_file)
# Compatibility check should succeed.
self.assertTrue(self.compat_checker.check_compatibility())
# Make sure no warning or error messages are recorded.
self.assertFalse(len(self.compat_checker.error_msg))
# Make sure total # of successes match total # of configs.
cnt = len(USER_CONFIG_IN_RANGE.keys())
self.assertEqual(len(self.compat_checker.successes), cnt)
def testWithUserConfigNotInRange(self):
"""Test a set of configs that are NOT supported.
Testing with the following combination should always return `failure`:
[1] A set of configurations that are NOT supported and/or compatible.
[2] `.ini` config file with proper formatting.
"""
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_NOT_IN_RANGE, self.test_file)
# Compatibility check should fail.
self.assertFalse(self.compat_checker.check_compatibility())
# Check error and warning messages.
err_msg_list = self.compat_checker.failures
self.assertTrue(len(err_msg_list))
# Make sure total # of failures match total # of configs.
cnt = len(USER_CONFIG_NOT_IN_RANGE.keys())
self.assertEqual(len(err_msg_list), cnt)
def testWithUserConfigMissing(self):
"""Test a set of configs that are empty or missing specification."""
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_MISSING, self.test_file)
# With missing specification in config file, the check should
# always fail.
self.assertFalse(self.compat_checker.check_compatibility())
if __name__ == "__main__":
unittest.main()
| tensorflow-master | tensorflow/tools/tensorflow_builder/compat_checker/compat_checker_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for operating on Python API Guide files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
def md_files_in_dir(py_guide_src_dir):
"""Returns a list of filename (full_path, base) pairs for guide files."""
all_in_dir = [(os.path.join(py_guide_src_dir, f), f)
for f in os.listdir(py_guide_src_dir)]
return [(full, f) for full, f in all_in_dir
if os.path.isfile(full) and f.endswith('.md')]
class PyGuideParser(object):
"""Simple parsing of a guide .md file.
Descendants can override the process_*() functions (called by process())
to either record information from the guide, or call replace_line()
to affect the return value of process().
"""
def __init__(self):
self._lines = None
def process(self, full_path):
"""Read and process the file at `full_path`."""
with open(full_path, 'rb') as f:
md_string = f.read().decode('utf-8')
self._lines = md_string.split('\n')
seen = set()
in_blockquote = False
for i, line in enumerate(self._lines):
if '```' in line:
in_blockquote = not in_blockquote
if not in_blockquote and line.startswith('# '):
self.process_title(i, line[2:])
elif not in_blockquote and line.startswith('## '):
section_title = line.strip()[3:]
existing_tag = re.search(' {([^}]+)} *$', line)
if existing_tag:
tag = existing_tag.group(1)
else:
tag = re.sub('[^a-zA-Z0-9]+', '_', section_title)
if tag in seen:
suffix = 0
while True:
candidate = '%s_%d' % (tag, suffix)
if candidate not in seen:
tag = candidate
break
seen.add(tag)
self.process_section(i, section_title, tag)
elif in_blockquote:
self.process_in_blockquote(i, line)
else:
self.process_line(i, line)
ret = '\n'.join(self._lines)
self._lines = None
return ret
def replace_line(self, line_number, line):
"""Replace the contents of line numbered `line_number` with `line`."""
self._lines[line_number] = line
def process_title(self, line_number, title):
pass
def process_section(self, line_number, section_title, tag):
pass
def process_in_blockquote(self, line_number, line):
pass
def process_line(self, line_number, line):
pass
| tensorflow-master | tensorflow/tools/docs/py_guide_parser.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tools.docs.doc_generator_visitor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import generate_lib
class NoDunderVisitor(doc_generator_visitor.DocGeneratorVisitor):
def __call__(self, parent_name, parent, children):
"""Drop all the dunder methods to make testing easier."""
children = [
(name, obj) for (name, obj) in children if not name.startswith('_')
]
super(NoDunderVisitor, self).__call__(parent_name, parent, children)
class DocGeneratorVisitorTest(googletest.TestCase):
def test_call_module(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'doc_generator_visitor', doc_generator_visitor,
[('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)])
self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']},
visitor.tree)
self.assertEqual({
'doc_generator_visitor': doc_generator_visitor,
'doc_generator_visitor.DocGeneratorVisitor':
doc_generator_visitor.DocGeneratorVisitor,
}, visitor.index)
def test_call_class(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor,
[('index', doc_generator_visitor.DocGeneratorVisitor.index)])
self.assertEqual({'DocGeneratorVisitor': ['index']},
visitor.tree)
self.assertEqual({
'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor,
'DocGeneratorVisitor.index':
doc_generator_visitor.DocGeneratorVisitor.index
}, visitor.index)
def test_call_raises(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
with self.assertRaises(RuntimeError):
visitor('non_class_or_module', 'non_class_or_module_object', [])
def test_duplicates_module_class_depth(self):
class Parent(object):
class Nested(object):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted([
'tf.Parent',
'tf.submodule.Parent',
]),
'tf.submodule.Parent.Nested':
sorted([
'tf.Parent.Nested',
'tf.submodule.Parent.Nested',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Parent.Nested': 'tf.submodule.Parent.Nested',
'tf.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(Parent): 'tf.submodule.Parent',
id(Parent.Nested): 'tf.submodule.Parent.Nested',
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
}, visitor.reverse_index)
def test_duplicates_contrib(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.contrib = types.ModuleType('contrib')
tf.submodule = types.ModuleType('submodule')
tf.contrib.Parent = Parent
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted(['tf.contrib.Parent', 'tf.submodule.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.contrib.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(tf.contrib): 'tf.contrib',
}, visitor.reverse_index)
def test_duplicates_defining_class(self):
class Parent(object):
obj1 = object()
class Child(Parent):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.Child = Child
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent.obj1': sorted([
'tf.Parent.obj1',
'tf.Child.obj1',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Child.obj1': 'tf.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(Parent): 'tf.Parent',
id(Child): 'tf.Child',
id(Parent.obj1): 'tf.Parent.obj1',
}, visitor.reverse_index)
def test_duplicates_module_depth(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.submodule2 = types.ModuleType('submodule2')
tf.Parent = Parent
tf.submodule.submodule2.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent': sorted(['tf.Parent', 'tf.submodule.submodule2.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.submodule2.Parent': 'tf.Parent'
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(tf.submodule.submodule2): 'tf.submodule.submodule2',
id(Parent): 'tf.Parent',
}, visitor.reverse_index)
def test_duplicates_name(self):
class Parent(object):
obj1 = object()
Parent.obj2 = Parent.obj1
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent.obj1':
sorted([
'tf.submodule.Parent.obj1',
'tf.submodule.Parent.obj2',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.Parent.obj2': 'tf.submodule.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(Parent.obj1): 'tf.submodule.Parent.obj1',
}, visitor.reverse_index)
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/doc_generator_visitor_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tool to generate api_docs for TensorFlow2.
```
python generate2.py --output_dir=/tmp/out
```
Requires a local installation of:
https://github.com/tensorflow/docs/tree/master/tools
tf-nightly-2.0-preview
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import parser
import tensorboard
import tensorflow_estimator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.
parser.tf_inspect = tf_inspect
# `tf` has an `__all__` that doesn't list important things like `keras`.
# The doc generator recognizes `__all__` as the list of public symbols.
# So patch `tf.__all__` to list everything.
tf.__all__ = [item_name for item_name, value in tf_inspect.getmembers(tf)]
tf.__doc__ = """
## TensorFlow 2.0 Beta
Caution: This is a developer preview. You will likely find some bugs,
performance issues, and more, and we encourage you to tell us about them.
We value your feedback!
These docs were generated from the beta build of TensorFlow 2.0.
You can install the exact version that was used to generate these docs
with:
```
pip install tensorflow==2.0.0-beta1
```
"""
FLAGS = flags.FLAGS
flags.DEFINE_string(
"code_url_prefix",
"/code/stable/tensorflow",
"A url to prepend to code paths when creating links to defining code")
flags.DEFINE_string(
"output_dir", "/tmp/out",
"A directory, where the docs will be output to.")
flags.DEFINE_bool("search_hints", True,
"Include meta-data search hints at the top of each file.")
flags.DEFINE_string("site_path", "",
"The prefix ({site-path}/api_docs/python/...) used in the "
"`_toc.yaml` and `_redirects.yaml` files")
# The doc generator isn't aware of tf_export.
# So prefix the score tuples with -1 when this is the canonical name, +1
# otherwise. The generator chooses the name with the lowest score.
class TfExportAwareDocGeneratorVisitor(
doc_generator_visitor.DocGeneratorVisitor):
"""A `tf_export` aware doc_visitor."""
def _score_name(self, name):
canonical = tf_export.get_canonical_name_for_symbol(self._index[name])
canonical_score = 1
if canonical is not None and name == "tf." + canonical:
canonical_score = -1
scores = super(TfExportAwareDocGeneratorVisitor, self)._score_name(name)
return (canonical_score,) + scores
def _hide_layer_and_module_methods():
"""Hide methods and properties defined in the base classes of keras layers."""
# __dict__ only sees attributes defined in *this* class, not on parent classes
module_contents = list(tf.Module.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents:
if name == "__init__":
continue
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def build_docs(output_dir, code_url_prefix, search_hints=True):
"""Build api docs for tensorflow v2.
Args:
output_dir: A string path, where to put the files.
code_url_prefix: prefix for "Defined in" links.
search_hints: Bool. Include meta-data search hints at the top of each file.
"""
_hide_layer_and_module_methods()
try:
doc_controls.do_not_generate_docs(tf.tools)
except AttributeError:
pass
base_dir = path.dirname(tf.__file__)
base_dirs = (
base_dir,
# External packages base directories,
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
)
code_url_prefixes = (
code_url_prefix,
# External packages source repositories,
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
doc_generator = generate_lib.DocGenerator(
root_title="TensorFlow 2.0 Preview",
py_modules=[("tf", tf)],
base_dir=base_dirs,
search_hints=search_hints,
code_url_prefix=code_url_prefixes,
site_path=FLAGS.site_path,
visitor_cls=TfExportAwareDocGeneratorVisitor)
doc_generator.build(output_dir)
def main(argv):
del argv
build_docs(output_dir=FLAGS.output_dir,
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints)
if __name__ == "__main__":
app.run(main)
| tensorflow-master | tensorflow/tools/docs/generate2.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for converting parsed doc content into markdown pages.
The adjacent `parser` module creates `PageInfo` objects, containing all data
necessary to document an element of the TensorFlow API.
This module contains one public function, which handels the conversion of these
`PageInfo` objects into a markdown string:
md_page = build_md_page(page_info)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' % page_info.full_name.split('.')[-1])
if page_info.bases:
parts.append('Inherits From: ')
link_template = '[`{short_name}`]({url})'
parts.append(', '.join(
link_template.format(**base._asdict()) for base in page_info.bases))
parts.append('\n\n')
# Sort the methods list, but make sure constructors come first.
constructor_names = ['__init__', '__new__']
constructors = sorted(
method for method in page_info.methods
if method.short_name in constructor_names)
other_methods = sorted(
method for method in page_info.methods
if method.short_name not in constructor_names)
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Class `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if constructors:
for method_info in constructors:
parts.append(_build_method_section(method_info, heading_level=2))
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
parts.append(_build_compatibility(prop_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if other_methods:
parts.append('## Methods\n\n')
for method_info in other_methods:
parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_method_section(method_info, heading_level=3):
"""Generates a markdown section for a method.
Args:
method_info: A `MethodInfo` object.
heading_level: An Int, which HTML heading level to use.
Returns:
A markdown string.
"""
parts = []
heading = ('<h{heading_level} id="{short_name}">'
'<code>{short_name}</code>'
'</h{heading_level}>\n\n')
parts.append(heading.format(heading_level=heading_level,
**method_info._asdict()))
if method_info.signature is not None:
parts.append(_build_signature(method_info, use_full_name=False))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Module `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
parts.append(h3.format(**item._asdict()))
return ''.join(parts)
def _build_signature(obj_info, use_full_name=True):
"""Returns a md code block showing the function signature."""
# Special case tf.range, since it has an optional first argument
if obj_info.full_name == 'tf.range':
return (
'``` python\n'
"tf.range(limit, delta=1, dtype=None, name='range')\n"
"tf.range(start, limit, delta=1, dtype=None, name='range')\n"
'```\n\n')
parts = ['``` python']
parts.extend(['@' + dec for dec in obj_info.decorators])
signature_template = '{name}({sig})'
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
if use_full_name:
obj_name = obj_info.full_name
else:
obj_name = obj_info.short_name
parts.append(signature_template.format(name=obj_name, sig=sig))
parts.append('```\n\n')
return '\n'.join(parts)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
# Dedent so that it does not trigger markdown code formatting.
value = textwrap.dedent(value)
parts.append('\n\n#### %s Compatibility\n%s\n' % (key.title(), value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + detail.keyword + ':\n\n')
sub.append(textwrap.dedent(detail.header))
for key, value in detail.items:
sub.append('* <b>`%s`</b>: %s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
| tensorflow-master | tensorflow/tools/docs/pretty_docs.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.tools.docs.generate2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate2
class Generate2Test(googletest.TestCase):
def test_end_to_end(self):
output_dir = os.path.join(googletest.GetTempDir(), 'output')
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
generate2.build_docs(output_dir=output_dir, code_url_prefix='')
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/generate2_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib
if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
# tensorflow/, we can compute the base directory (two levels up), which is
# valid unless we're trying to apply this to a different code base, or are
# moving the script around.
script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
default_base_dir = os.path.join(script_dir, '..', '..')
doc_generator.add_base_dir_argument(default_base_dir)
flags = doc_generator.parse_known_args()
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
sys.exit(doc_generator.build(flags))
| tensorflow-master | tensorflow/tools/docs/generate.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be told the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _score_name(self, name):
"""Return a tuple of scores indicating how to sort for the best name.
This function is meant to be used as the `key` to the `sorted` function.
This sorting in order:
Prefers names refering to the defining class, over a subclass.
Prefers names that are not in "contrib".
prefers submodules to the root namespace.
Prefers short names `tf.thing` over `tf.a.b.c.thing`
Sorts lexicographically on name parts.
Args:
name: the full name to score, for example `tf.estimator.Estimator`
Returns:
A tuple of scores. When sorted the preferred name will have the lowest
value.
"""
parts = name.split('.')
short_name = parts[-1]
container = self._index['.'.join(parts[:-1])]
defining_class_score = 1
if tf_inspect.isclass(container):
if short_name in container.__dict__:
# prefer the defining class
defining_class_score = -1
contrib_score = -1
if 'contrib' in parts:
contrib_score = 1
while parts:
container = self._index['.'.join(parts)]
if tf_inspect.ismodule(container):
break
parts.pop()
module_length = len(parts)
if len(parts) == 2:
# `tf.submodule.thing` is better than `tf.thing`
module_length_score = -1
else:
# shorter is better
module_length_score = module_length
return (defining_class_score, contrib_score, module_length_score, name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
and py_object is not ()): # pylint: disable=literal-comparison
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
master_name = (
tf_export.get_canonical_name_for_symbol(self._index[names[0]])
if names else None)
if master_name:
master_name = 'tf.%s' % master_name
else:
# Choose the master name with a lexical sort on the tuples returned by
# by _score_name.
master_name = min(names, key=self._score_name)
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
| tensorflow-master | tensorflow/tools/docs/doc_generator_visitor.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Documentation control decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
_DO_NOT_DOC = "_tf_docs_do_not_document"
def do_not_generate_docs(obj):
"""A decorator: Do not generate docs for this object.
For example the following classes:
```
class Parent(object):
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
def method1(self):
pass
def method2(self):
pass
```
Produce the following api_docs:
```
/Parent.md
# method1
# method2
/Child.md
# method1
# method2
```
This decorator allows you to skip classes or methods:
```
@do_not_generate_docs
class Parent(object):
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
@do_not_generate_docs
def method1(self):
pass
def method2(self):
pass
```
This will only produce the following docs:
```
/Child.md
# method2
```
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@do_not_generate_docs
def x(self):
return self._x
```
Args:
obj: The object to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _DO_NOT_DOC, None)
return obj
_DO_NOT_DOC_INHERITABLE = "_tf_docs_do_not_doc_inheritable"
def do_not_doc_inheritable(obj):
"""A decorator: Do not generate docs for this method.
This version of the decorator is "inherited" by subclasses. No docs will be
generated for the decorated method in any subclass. Even if the sub-class
overrides the method.
For example, to ensure that `method1` is **never documented** use this
decorator on the base-class:
```
class Parent(object):
@do_not_doc_inheritable
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
def method1(self):
pass
def method2(self):
pass
```
This will produce the following docs:
```
/Parent.md
# method2
/Child.md
# method2
```
When generating docs for a class's arributes, the `__mro__` is searched and
the attribute will be skipped if this decorator is detected on the attribute
on any class in the `__mro__`.
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@do_not_doc_inheritable
def x(self):
return self._x
```
Args:
obj: The class-attribute to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _DO_NOT_DOC_INHERITABLE, None)
return obj
_FOR_SUBCLASS_IMPLEMENTERS = "_tf_docs_tools_for_subclass_implementers"
def for_subclass_implementers(obj):
"""A decorator: Only generate docs for this method in the defining class.
Also group this method's docs with and `@abstractmethod` in the class's docs.
No docs will generated for this class attribute in sub-classes.
The canonical use case for this is `tf.keras.layers.Layer.call`: It's a
public method, essential for anyone implementing a subclass, but it should
never be called directly.
Works on method, or other class-attributes.
When generating docs for a class's arributes, the `__mro__` is searched and
the attribute will be skipped if this decorator is detected on the attribute
on any **parent** class in the `__mro__`.
For example:
```
class Parent(object):
@for_subclass_implementers
def method1(self):
pass
def method2(self):
pass
class Child1(Parent):
def method1(self):
pass
def method2(self):
pass
class Child2(Parent):
def method1(self):
pass
def method2(self):
pass
```
This will produce the following docs:
```
/Parent.md
# method1
# method2
/Child1.md
# method2
/Child2.md
# method2
```
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@for_subclass_implementers
def x(self):
return self._x
```
Args:
obj: The class-attribute to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _FOR_SUBCLASS_IMPLEMENTERS, None)
return obj
do_not_doc_in_subclasses = for_subclass_implementers
def should_skip(obj):
"""Returns true if docs generation should be skipped for this object.
checks for the `do_not_generate_docs` or `do_not_doc_inheritable` decorators.
Args:
obj: The object to document, or skip.
Returns:
True if the object should be skipped
"""
# Unwrap fget if the object is a property
if isinstance(obj, property):
obj = obj.fget
return hasattr(obj, _DO_NOT_DOC) or hasattr(obj, _DO_NOT_DOC_INHERITABLE)
def should_skip_class_attr(cls, name):
"""Returns true if docs should be skipped for this class attribute.
Args:
cls: The class the attribute belongs to.
name: The name of the attribute.
Returns:
True if the attribute should be skipped.
"""
# Get the object with standard lookup, from the nearest
# defining parent.
try:
obj = getattr(cls, name)
except AttributeError:
# Avoid error caused by enum metaclasses in python3
if name in ("name", "value"):
return True
raise
# Unwrap fget if the object is a property
if isinstance(obj, property):
obj = obj.fget
# Skip if the object is decorated with `do_not_generate_docs` or
# `do_not_doc_inheritable`
if should_skip(obj):
return True
# Use __dict__ lookup to get the version defined in *this* class.
obj = cls.__dict__.get(name, None)
if isinstance(obj, property):
obj = obj.fget
if obj is not None:
# If not none, the object is defined in *this* class.
# Do not skip if decorated with `for_subclass_implementers`.
if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
return False
# for each parent class
for parent in cls.__mro__[1:]:
obj = getattr(parent, name, None)
if obj is None:
continue
if isinstance(obj, property):
obj = obj.fget
# Skip if the parent's definition is decorated with `do_not_doc_inheritable`
# or `for_subclass_implementers`
if hasattr(obj, _DO_NOT_DOC_INHERITABLE):
return True
if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
return True
# No blockng decorators --> don't skip
return False
| tensorflow-master | tensorflow/tools/docs/doc_controls.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import parser
# The test needs a real module. `types.ModuleType()` doesn't work, as the result
# is a `builtin` module. Using "parser" here is arbitraty. The tests don't
# depend on the module contents. At this point in the process the public api
# has already been extracted.
test_module = parser
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
pass
def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
"""Docstring for second test function."""
pass
class ParentClass(object):
@doc_controls.do_not_doc_inheritable
def hidden_method(self):
pass
class TestClass(ParentClass):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
def hidden_method(self):
pass
@doc_controls.do_not_generate_docs
def hidden_method2(self):
pass
class ChildClass(object):
"""Docstring for a child class."""
pass
@property
def a_property(self):
"""Docstring for a property."""
pass
CLASS_MEMBER = 'a class member'
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class ParserTest(googletest.TestCase):
def test_documentation_path(self):
self.assertEqual('test.md', parser.documentation_path('test'))
self.assertEqual('test/module.md', parser.documentation_path('test.module'))
def test_replace_references(self):
class HasOneMember(object):
def foo(self):
pass
string = (
'A @{tf.reference}, another @{tf.reference$with\nnewline}, a member '
'@{tf.reference.foo}, and a @{tf.third$link `text` with `code` in '
'it}.')
duplicate_of = {'tf.third': 'tf.fourth'}
index = {'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember}
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
result = reference_resolver.replace_references(string, '../..')
self.assertEqual('A <a href="../../tf/reference.md">'
'<code>tf.reference</code></a>, '
'another <a href="../../tf/reference.md">'
'with\nnewline</a>, '
'a member <a href="../../tf/reference.md#foo">'
'<code>tf.reference.foo</code></a>, '
'and a <a href="../../tf/fourth.md">link '
'<code>text</code> with '
'<code>code</code> in it</a>.', result)
def test_doc_replace_references(self):
string = '@{$doc1} @{$doc1#abc} @{$doc1$link} @{$doc1#def$zelda} @{$do/c2}'
class DocInfo(object):
pass
doc1 = DocInfo()
doc1.title = 'Title1'
doc1.url = 'URL1'
doc2 = DocInfo()
doc2.title = 'Two words'
doc2.url = 'somewhere/else'
doc_index = {'doc1': doc1, 'do/c2': doc2}
visitor = DummyVisitor(index={}, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index=doc_index, py_module_names=['tf'])
result = reference_resolver.replace_references(string, 'python')
self.assertEqual('<a href="../URL1">Title1</a> '
'<a href="../URL1#abc">Title1</a> '
'<a href="../URL1">link</a> '
'<a href="../URL1#def">zelda</a> '
'<a href="../somewhere/else">Two words</a>', result)
def test_docs_for_class(self):
index = {
'TestClass': TestClass,
'TestClass.a_method': TestClass.a_method,
'TestClass.a_property': TestClass.a_property,
'TestClass.ChildClass': TestClass.ChildClass,
'TestClass.CLASS_MEMBER': TestClass.CLASS_MEMBER
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestClass': ['a_method', 'a_property', 'ChildClass', 'CLASS_MEMBER']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestClass', py_object=TestClass, parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(TestClass).split('\n')[0], page_info.doc.brief)
# Make sure the method is present
self.assertEqual(TestClass.a_method, page_info.methods[0].obj)
# Make sure that the signature is extracted properly and omits self.
self.assertEqual(["arg='default'"], page_info.methods[0].signature)
# Make sure the property is present
self.assertIs(TestClass.a_property, page_info.properties[0].obj)
# Make sure there is a link to the child class and it points the right way.
self.assertIs(TestClass.ChildClass, page_info.classes[0].obj)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_namedtuple_field_order(self):
namedtupleclass = collections.namedtuple('namedtupleclass',
{'z', 'y', 'x', 'w', 'v', 'u'})
index = {
'namedtupleclass': namedtupleclass,
'namedtupleclass.u': namedtupleclass.u,
'namedtupleclass.v': namedtupleclass.v,
'namedtupleclass.w': namedtupleclass.w,
'namedtupleclass.x': namedtupleclass.x,
'namedtupleclass.y': namedtupleclass.y,
'namedtupleclass.z': namedtupleclass.z,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {'namedtupleclass': {'u', 'v', 'w', 'x', 'y', 'z'}}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='namedtupleclass',
py_object=namedtupleclass,
parser_config=parser_config)
# Each namedtiple field has a docstring of the form:
# 'Alias for field number ##'. These props are returned sorted.
def sort_key(prop_info):
return int(prop_info.obj.__doc__.split(' ')[-1])
self.assertSequenceEqual(page_info.properties,
sorted(page_info.properties, key=sort_key))
def test_docs_for_class_should_skip(self):
class Parent(object):
@doc_controls.do_not_doc_inheritable
def a_method(self, arg='default'):
pass
class Child(Parent):
def a_method(self, arg='default'):
pass
index = {
'Child': Child,
'Child.a_method': Child.a_method,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'Child': ['a_method'],
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='Child', py_object=Child, parser_config=parser_config)
# Make sure the `a_method` is not present
self.assertEqual(0, len(page_info.methods))
def test_docs_for_message_class(self):
class CMessage(object):
def hidden(self):
pass
class Message(object):
def hidden2(self):
pass
class MessageMeta(object):
def hidden3(self):
pass
class ChildMessage(CMessage, Message, MessageMeta):
def my_method(self):
pass
index = {
'ChildMessage': ChildMessage,
'ChildMessage.hidden': ChildMessage.hidden,
'ChildMessage.hidden2': ChildMessage.hidden2,
'ChildMessage.hidden3': ChildMessage.hidden3,
'ChildMessage.my_method': ChildMessage.my_method,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {'ChildMessage': ['hidden', 'hidden2', 'hidden3', 'my_method']}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='ChildMessage',
py_object=ChildMessage,
parser_config=parser_config)
self.assertEqual(1, len(page_info.methods))
self.assertEqual('my_method', page_info.methods[0].short_name)
def test_docs_for_module(self):
index = {
'TestModule':
test_module,
'TestModule.test_function':
test_function,
'TestModule.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass':
TestClass,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestModule',
py_object=test_module,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_module).split('\n')[0], page_info.doc.brief)
# Make sure that the members are there
funcs = {f_info.obj for f_info in page_info.functions}
self.assertEqual({test_function, test_function_with_args_kwargs}, funcs)
classes = {cls_info.obj for cls_info in page_info.classes}
self.assertEqual({TestClass}, classes)
# Make sure the module's file is contained as the definition location.
self.assertEqual(
os.path.relpath(test_module.__file__.rstrip('c'), '/'),
page_info.defined_in.path)
def test_docs_for_function(self):
index = {
'test_function': test_function
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function',
py_object=test_function,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_function).split('\n')[0], page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', "unused_kwarg='default'"],
page_info.signature)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_docs_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_function_with_args_kwargs).split('\n')[0],
page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', '*unused_args', '**unused_kwargs'],
page_info.signature)
def test_parse_md_docstring(self):
def test_function_with_fancy_docstring(arg):
"""Function with a fancy docstring.
And a bunch of references: @{tf.reference}, another @{tf.reference},
a member @{tf.reference.foo}, and a @{tf.third}.
Args:
arg: An argument.
Raises:
an exception
Returns:
arg: the input, and
arg: the input, again.
@compatibility(numpy)
NumPy has nothing as awesome as this function.
@end_compatibility
@compatibility(theano)
Theano has nothing as awesome as this function.
Check it out.
@end_compatibility
"""
return arg, arg
class HasOneMember(object):
def foo(self):
pass
duplicate_of = {'tf.third': 'tf.fourth'}
index = {
'tf': test_module,
'tf.fancy': test_function_with_fancy_docstring,
'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember
}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
doc_info = parser._parse_md_docstring(test_function_with_fancy_docstring,
'../..', reference_resolver)
self.assertNotIn('@', doc_info.docstring)
self.assertNotIn('compatibility', doc_info.docstring)
self.assertNotIn('Raises:', doc_info.docstring)
self.assertEqual(len(doc_info.function_details), 3)
self.assertEqual(set(doc_info.compatibility.keys()), {'numpy', 'theano'})
self.assertEqual(doc_info.compatibility['numpy'],
'NumPy has nothing as awesome as this function.\n')
def test_generate_index(self):
index = {
'tf': test_module,
'tf.TestModule': test_module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.a_method': TestClass.a_method,
'tf.TestModule.TestClass.a_property': TestClass.a_property,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
duplicate_of = {'tf.TestModule.test_function': 'tf.test_function'}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
docs = parser.generate_global_index('TestLibrary', index=index,
reference_resolver=reference_resolver)
# Make sure duplicates and non-top-level symbols are in the index, but
# methods and properties are not.
self.assertNotIn('a_method', docs)
self.assertNotIn('a_property', docs)
self.assertIn('TestModule.TestClass', docs)
self.assertIn('TestModule.TestClass.ChildClass', docs)
self.assertIn('TestModule.test_function', docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
self.assertIn('<code>tf.test_function', docs)
def test_argspec_for_functools_partial(self):
# pylint: disable=unused-argument
def test_function_for_partial1(arg1, arg2, kwarg1=1, kwarg2=2):
pass
def test_function_for_partial2(arg1, arg2, *my_args, **my_kwargs):
pass
# pylint: enable=unused-argument
# pylint: disable=protected-access
# Make sure everything works for regular functions.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
expected = tf_inspect.FullArgSpec(
args=['arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['kwarg2'],
varargs=None,
varkw=None,
defaults=(2,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(2,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1'],
varargs=None,
varkw=None,
defaults=(1,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['arg1'],
varargs=None,
varkw=None,
defaults=(),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1,
arg2=0, kwarg1=0, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure *args, *kwargs is accounted for.
expected = tf_inspect.FullArgSpec(
args=[],
varargs='my_args',
varkw='my_kwargs',
defaults=(),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial2, 0, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# pylint: enable=protected-access
def testSaveReferenceResolver(self):
you_cant_serialize_this = object()
duplicate_of = {'AClass': ['AClass2']}
doc_index = {'doc': you_cant_serialize_this}
is_fragment = {
'tf': False,
'tf.VERSION': True,
'tf.AClass': False,
'tf.AClass.method': True,
'tf.AClass2': False,
'tf.function': False
}
py_module_names = ['tf', 'tfdbg']
resolver = parser.ReferenceResolver(duplicate_of, doc_index, is_fragment,
py_module_names)
outdir = googletest.GetTempDir()
filepath = os.path.join(outdir, 'resolver.json')
resolver.to_json_file(filepath)
resolver2 = parser.ReferenceResolver.from_json_file(filepath, doc_index)
# There are no __slots__, so all fields are visible in __dict__.
self.assertEqual(resolver.__dict__, resolver2.__dict__)
def testIsFreeFunction(self):
result = parser.is_free_function(test_function, 'test_module.test_function',
{'test_module': test_module})
self.assertTrue(result)
result = parser.is_free_function(test_function, 'TestClass.test_function',
{'TestClass': TestClass})
self.assertFalse(result)
result = parser.is_free_function(TestClass, 'TestClass', {})
self.assertFalse(result)
result = parser.is_free_function(test_module, 'test_module', {})
self.assertFalse(result)
RELU_DOC = """Computes rectified linear: `max(features, 0)`
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional)
Returns:
A `Tensor`. Has the same type as `features`
"""
class TestParseFunctionDetails(googletest.TestCase):
def test_parse_function_details(self):
docstring, function_details = parser._parse_function_details(RELU_DOC)
self.assertEqual(len(function_details), 2)
args = function_details[0]
self.assertEqual(args.keyword, 'Args')
self.assertEqual(len(args.header), 0)
self.assertEqual(len(args.items), 2)
self.assertEqual(args.items[0][0], 'features')
self.assertEqual(args.items[1][0], 'name')
self.assertEqual(args.items[1][1],
'A name for the operation (optional)\n\n')
returns = function_details[1]
self.assertEqual(returns.keyword, 'Returns')
relu_doc_lines = RELU_DOC.split('\n')
self.assertEqual(docstring, relu_doc_lines[0] + '\n\n')
self.assertEqual(returns.header, relu_doc_lines[-2] + '\n')
self.assertEqual(
RELU_DOC,
docstring + ''.join(str(detail) for detail in function_details))
class TestGenerateSignature(googletest.TestCase):
def test_known_object(self):
known_object = object()
reverse_index = {id(known_object): 'location.of.object.in.api'}
def example_fun(arg=known_object): # pylint: disable=unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index)
self.assertEqual(sig, ['arg=location.of.object.in.api'])
def test_literals(self):
if sys.version_info >= (3, 0):
print('Warning: Doc generation is not supported from python3.')
return
def example_fun(a=5, b=5.0, c=None, d=True, e='hello', f=(1, (2, 3))): # pylint: disable=g-bad-name, unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index={})
self.assertEqual(
sig, ['a=5', 'b=5.0', 'c=None', 'd=True', "e='hello'", 'f=(1, (2, 3))'])
def test_dotted_name(self):
if sys.version_info >= (3, 0):
print('Warning: Doc generation is not supported from python3.')
return
# pylint: disable=g-bad-name
class a(object):
class b(object):
class c(object):
class d(object):
def __init__(self, *args):
pass
# pylint: enable=g-bad-name
e = {'f': 1}
def example_fun(arg1=a.b.c.d, arg2=a.b.c.d(1, 2), arg3=e['f']): # pylint: disable=unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index={})
self.assertEqual(sig, ['arg1=a.b.c.d', 'arg2=a.b.c.d(1, 2)', "arg3=e['f']"])
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/parser_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path=''):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path, dup.replace('.', '/'))
to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
redirects.append((
os.path.join('/', from_path),
os.path.join('/', to_path)))
if redirects:
redirects = sorted(redirects)
template = ('- from: {}\n'
' to: {}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[module])]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.compat': ['v1', 'v2'],
'tf.contrib.estimator': ['python'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
"""A `docs_controls` aware API-crawler."""
def _is_private(self, path, name, obj):
if doc_controls.should_skip(obj):
return True
return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index the file and section of each `symbol` reference."""
for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(content)
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir,
output_dir,
reference_resolver,
file_pattern='*.md',
api_docs_relpath='api_docs'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
api_docs_relpath: Relative-path string to the api_docs, from the src_dir.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
depth = os.path.relpath(src_dir, start=dirpath)
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.join(depth, api_docs_relpath, 'python')
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
if full_in_path != full_out_path:
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
self.argument_parser.add_argument(
'--site_api_path',
type=str, default='',
help='The path from the site-root to api_docs'
'directory for this project')
self.argument_parser.add_argument(
'--api_cache_out_path',
type=str,
default=None,
help='Path to store a json-serialized api-index, so links can be '
'inserted into docs without rebuilding the api_docs')
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
if getattr(flags, 'api_cache_out_path', None):
reference_resolver.to_json_file(flags.api_cache_out_path)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', ''))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
| tensorflow-master | tensorflow/tools/docs/generate_lib.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import functools
import itertools
import json
import os
import re
import astor
import six
from google.protobuf.message import Message as ProtoMessage
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
def is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method).
Args:
py_object: The the object in question.
full_name: The full name of the object, like `tf.module.symbol`.
index: The {full_name:py_object} dictionary for the public API.
Returns:
True if the obeject is a stand-alone function, and not part of a class
definition.
"""
if not tf_inspect.isfunction(py_object):
return False
parent_name = full_name.rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
# A regular expression capturing a python identifier.
IDENTIFIER_RE = r'[a-zA-Z_]\w*'
class TFDocsError(Exception):
pass
class _Errors(object):
"""A collection of errors."""
def __init__(self):
self._errors = []
def log_all(self):
"""Log all the collected errors to the standard error."""
template = 'ERROR:\n output file name: %s\n %s\n\n'
for full_name, message in self._errors:
logging.warn(template, full_name, message)
def append(self, full_name, message):
"""Add an error to the collection.
Args:
full_name: The path to the file in which the error occurred.
message: The message to display with the error.
"""
self._errors.append((full_name, message))
def __len__(self):
return len(self._errors)
def __eq__(self, other):
if not isinstance(other, _Errors):
return False
return self._errors == other._errors # pylint: disable=protected-access
def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` -->
`tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
`tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
parts = full_name.split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = os.path.join(*parts) + '.md'
if is_fragment:
result = result + '#' + fragment
return result
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, tf_inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (tf_inspect.isclass(py_object) or tf_inspect.ismethod(py_object) or
tf_inspect.isfunction(py_object) or tf_inspect.ismodule(py_object) or
isinstance(py_object, property)):
return tf_inspect.getdoc(py_object) or ''
else:
return ''
# A regular expression for capturing a @{symbol} reference.
SYMBOL_REFERENCE_RE = re.compile(
r"""
# Start with a literal "@{".
@\{
# Group at least 1 symbol, not "}".
([^}]+)
# Followed by a closing "}"
\}
""",
flags=re.VERBOSE)
AUTO_REFERENCE_RE = re.compile(r'`([a-zA-Z0-9_.]+?)`')
class ReferenceResolver(object):
"""Class for replacing @{...} references with Markdown links.
Attributes:
current_doc_full_name: A string (or None) indicating the name of the
document currently being processed, so errors can reference the broken
doc.
"""
def __init__(self, duplicate_of, doc_index, is_fragment, py_module_names):
"""Initializes a Reference Resolver.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
is_fragment: A map from full names to bool for each symbol. If True the
object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False
object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`.
py_module_names: A list of string names of Python modules.
"""
self._duplicate_of = duplicate_of
self._doc_index = doc_index
self._is_fragment = is_fragment
self._all_names = set(is_fragment.keys())
self._py_module_names = py_module_names
self.current_doc_full_name = None
self._errors = _Errors()
def add_error(self, message):
self._errors.append(self.current_doc_full_name, message)
def log_errors(self):
self._errors.log_all()
def num_errors(self):
return len(self._errors)
@classmethod
def from_visitor(cls, visitor, doc_index, **kwargs):
"""A factory function for building a ReferenceResolver from a visitor.
Args:
visitor: an instance of `DocGeneratorVisitor`
doc_index: a dictionary mapping document names to references objects with
"title" and "url" fields
**kwargs: all remaining args are passed to the constructor
Returns:
an instance of `ReferenceResolver` ()
"""
is_fragment = {}
for name, obj in visitor.index.items():
has_page = (
tf_inspect.isclass(obj) or tf_inspect.ismodule(obj) or
is_free_function(obj, name, visitor.index))
is_fragment[name] = not has_page
return cls(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index,
is_fragment=is_fragment,
**kwargs)
@classmethod
def from_json_file(cls, filepath, doc_index):
with open(filepath) as f:
json_dict = json.load(f)
return cls(doc_index=doc_index, **json_dict)
def to_json_file(self, filepath):
"""Converts the RefenceResolver to json and writes it to the specified file.
Args:
filepath: The file path to write the json to.
"""
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
json_dict = {}
for key, value in self.__dict__.items():
# Drop these two fields. `_doc_index` is not serializable. `_all_names` is
# generated by the constructor.
if key in ('_doc_index', '_all_names',
'_errors', 'current_doc_full_name'):
continue
# Strip off any leading underscores on field names as these are not
# recognized by the constructor.
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f, indent=2, sort_keys=True)
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "@{symbol}" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
def strict_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError as e:
self.add_error(e.message)
return 'BAD_LINK'
string = re.sub(SYMBOL_REFERENCE_RE, strict_one_ref, string)
def sloppy_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError:
return match.group(0)
string = re.sub(AUTO_REFERENCE_RE, sloppy_one_ref, string)
return string
def python_link(self, link_text, ref_full_name, relative_path_to_root,
code_ref=True):
"""Resolve a "@{python symbol}" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return '<a href="{}">{}</a>'.format(url, link_text)
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', link_text)
def py_master_name(self, full_name):
"""Return the master name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "@{python symbol}" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
RuntimeError: If `ref_full_name` is not documented.
TFDocsError: If the @{} syntax cannot be decoded.
"""
master_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if master_name not in self._all_names:
raise TFDocsError(
'Cannot make link to "%s": Not in index.' % master_name)
ref_path = documentation_path(master_name, self._is_fragment[master_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
string = match.group(1)
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if string.startswith('$'): # Doc reference
return self._doc_link(string, link_text, manual_link_text,
relative_path_to_root)
elif string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, manual_link_text,
relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text,
string,
relative_path_to_root,
code_ref=not manual_link_text)
# Error!
raise TFDocsError('Did not understand "%s"' % match.group(0),
'BROKEN_LINK')
def _doc_link(self, string, link_text, manual_link_text,
relative_path_to_root):
"""Generate a link for a @{$...} reference."""
string = string[1:] # remove leading $
# If string has a #, split that part into `hash_tag`
hash_pos = string.find('#')
if hash_pos > -1:
hash_tag = string[hash_pos:]
string = string[:hash_pos]
else:
hash_tag = ''
if string in self._doc_index:
if not manual_link_text: link_text = self._doc_index[string].title
url = os.path.normpath(os.path.join(
relative_path_to_root, '../..', self._doc_index[string].url))
link_text = self._link_text_to_html(link_text)
return '<a href="{}{}">{}</a>'.format(url, hash_tag, link_text)
return self._doc_missing(string, hash_tag, link_text, manual_link_text,
relative_path_to_root)
def _doc_missing(self, string, unused_hash_tag, unused_link_text,
unused_manual_link_text, unused_relative_path_to_root):
"""Generate an error for unrecognized @{$...} references."""
raise TFDocsError('Unknown Document "%s"' % string)
def _cc_link(self, string, link_text, unused_manual_link_text,
relative_path_to_root):
"""Generate a link for a @{tensorflow::...} reference."""
# TODO(josh11b): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
raise TFDocsError('C++ reference not understood: "%s"' % string)
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(os.path.join(
relative_path_to_root, '../cc', ret))
return '<a href="{}"><code>{}</code></a>'.format(cc_relative_path,
link_text)
# TODO(aselle): Collect these into a big list for all modules and functions
# and make a rosetta stone page.
def _handle_compatibility(doc):
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes"
Returns:
a tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _gen_pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Yields:
The original items, in pairs
"""
assert len(items) % 2 == 0
items = iter(items)
while True:
try:
yield next(items), next(items)
except StopIteration:
return
class _FunctionDetail(
collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])):
"""A simple class to contain function details.
Composed of a "keyword", a possibly empty "header" string, and a possibly
empty
list of key-value pair "items".
"""
__slots__ = []
def __str__(self):
"""Return the original string that represents the function detail."""
parts = [self.keyword + ':\n']
parts.append(self.header)
for key, value in self.items:
parts.append(' ' + key + ': ')
parts.append(value)
return ''.join(parts)
def _parse_function_details(docstring):
r"""Given a docstring, split off the header and parse the function details.
For example the docstring of tf.nn.relu:
'''Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
'''
This is parsed, and returned as:
```
('Computes rectified linear: `max(features, 0)`.\n\n', [
_FunctionDetail(
keyword='Args',
header='',
items=[
('features', ' A `Tensor`. Must be ...'),
('name', ' A name for the operation (optional).\n\n')]),
_FunctionDetail(
keyword='Returns',
header=' A `Tensor`. Has the same type as `features`.',
items=[])
])
```
Args:
docstring: The docstring to parse
Returns:
A (header, function_details) pair, where header is a string and
function_details is a (possibly empty) list of `_FunctionDetail` objects.
"""
detail_keywords = '|'.join([
'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes'
])
tag_re = re.compile('(?<=\n)(' + detail_keywords + '):\n', re.MULTILINE)
parts = tag_re.split(docstring)
# The first part is the main docstring
docstring = parts[0]
# Everything else alternates keyword-content
pairs = list(_gen_pairs(parts[1:]))
function_details = []
item_re = re.compile(r'^ ? ?(\*?\*?\w[\w.]*?\s*):\s', re.MULTILINE)
for keyword, content in pairs:
content = item_re.split(content)
header = content[0]
items = list(_gen_pairs(content[1:]))
function_details.append(_FunctionDetail(keyword, header, items))
return docstring, function_details
_DocstringInfo = collections.namedtuple('_DocstringInfo', [
'brief', 'docstring', 'function_details', 'compatibility'
])
def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces @{} references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "@{symbol}" references.
reference_resolver: An instance of ReferenceResolver.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = reference_resolver.replace_references(
raw_docstring, relative_path_to_root)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `tf_inspect.getfullargspec`. For `functools.partial`
objects, corrects the signature of the underlying function to take into
account the removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `FullArgSpec` namedtuple `(args, varargs, varkw, defaults, etc.)`,
as returned by `tf_inspect.getfullargspec`.
"""
# getfullargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = tf_inspect.getfullargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return tf_inspect.FullArgSpec(
args=argspec_args,
varargs=argspec.varargs,
varkw=argspec.varkw,
defaults=tuple(argspec_defaults),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
else: # Regular function or method, getargspec will work fine.
return tf_inspect.getfullargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', string).group(0))
return '\n'.join([line[indent:] for line in string.split('\n')])
PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def _generate_signature(func, reverse_index):
"""Given a function, returns a list of strings representing its args.
This function produces a list of strings representing the arguments to a
python function. It uses tf_inspect.getfullargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A list of strings representing the argument signature of `func` as python
code.
"""
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
try:
source = _remove_first_line_indent(tf_inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
except IOError: # If this is a builtin, getsource fails with IOError
# If we cannot get the source, assume the AST would be equal to the repr
# of the defaults.
ast_defaults = [None] * len(argspec.defaults)
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
elif ast_default is not None:
default_text = (
astor.to_source(ast_default).rstrip('\n').replace('\t', '\\t')
.replace('\n', '\\n').replace('"""', "'"))
default_text = PAREN_NUMBER_RE.sub('\\1', default_text)
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
logging.warn(
'WARNING: Using default arg, failed lookup: %s, repr: %r',
default_text, default)
else:
default_text = lookup_text
else:
default_text = repr(default)
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + argspec.varargs)
if argspec.varkw:
args_list.append('**' + argspec.varkw)
return args_list
def _get_guides_markdown(duplicate_names, guide_index, relative_path):
all_guides = []
for name in duplicate_names:
all_guides.extend(guide_index.get(name, []))
if not all_guides: return ''
prefix = '../' * (relative_path.count('/') + 3)
links = sorted(set([guide_ref.make_md_link(prefix)
for guide_ref in all_guides]))
return 'See the guide%s: %s\n\n' % (
's' if len(links) > 1 else '', ', '.join(links))
def _get_defining_class(py_class, name):
for cls in tf_inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class _LinkInfo(
collections.namedtuple(
'_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])):
__slots__ = []
def is_link(self):
return True
class _OtherMemberInfo(
collections.namedtuple('_OtherMemberInfo',
['short_name', 'full_name', 'obj', 'doc'])):
__slots__ = []
def is_link(self):
return False
_PropertyInfo = collections.namedtuple(
'_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc'])
_MethodInfo = collections.namedtuple('_MethodInfo', [
'short_name', 'full_name', 'obj', 'doc', 'signature', 'decorators'
])
class _FunctionPageInfo(object):
"""Collects docs For a function Page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._signature = None
self._decorators = []
def for_function(self):
return True
def for_class(self):
return False
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def signature(self):
return self._signature
def set_signature(self, function, reverse_index):
"""Attach the function's signature.
Args:
function: The python function being documented.
reverse_index: A map from object ids in the index to full names.
"""
assert self.signature is None
self._signature = _generate_signature(function, reverse_index)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
def get_metadata_html(self):
return _Metadata(self.full_name).build_html()
class _ClassPageInfo(object):
"""Collects docs for a class page.
Attributes:
full_name: The fully qualified name of the object at the master
location. Aka `master_name`. For example: `tf.nn.sigmoid`.
short_name: The last component of the `full_name`. For example: `sigmoid`.
defined_in: The path to the file where this object is defined.
aliases: The list of all fully qualified names for the locations where the
object is visible in the public api. This includes the master location.
doc: A `_DocstringInfo` object representing the object's docstring (can be
created with `_parse_md_docstring`).
guides: A markdown string, of back links pointing to the api_guides that
reference this object.
bases: A list of `_LinkInfo` objects pointing to the docs for the parent
classes.
properties: A list of `_PropertyInfo` objects documenting the class'
properties (attributes that use `@property`).
methods: A list of `_MethodInfo` objects documenting the class' methods.
classes: A list of `_LinkInfo` objects pointing to docs for any nested
classes.
other_members: A list of `_OtherMemberInfo` objects documenting any other
object's defined inside the class object (mostly enum style fields).
"""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._namedtuplefields = None
self._bases = None
self._properties = []
self._methods = []
self._classes = []
self._other_members = []
def for_function(self):
"""Returns true if this object documents a function."""
return False
def for_class(self):
"""Returns true if this object documents a class."""
return True
def for_module(self):
"""Returns true if this object documents a module."""
return False
@property
def full_name(self):
"""Returns the documented object's fully qualified name."""
return self._full_name
@property
def short_name(self):
"""Returns the documented object's short name."""
return self._full_name.split('.')[-1]
@property
def defined_in(self):
"""Returns the path to the file where the documented object is defined."""
return self._defined_in
def set_defined_in(self, defined_in):
"""Sets the `defined_in` path."""
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
"""Returns a list of all full names for the documented object."""
return self._aliases
def set_aliases(self, aliases):
"""Sets the `aliases` list.
Args:
aliases: A list of strings. Containing all the object's full names.
"""
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
"""Returns a `_DocstringInfo` created from the object's docstring."""
return self._doc
def set_doc(self, doc):
"""Sets the `doc` field.
Args:
doc: An instance of `_DocstringInfo`.
"""
assert self.doc is None
self._doc = doc
@property
def guides(self):
"""Returns a markdown string containing backlinks to relevant api_guides."""
return self._guides
def set_guides(self, guides):
"""Sets the `guides` field.
Args:
guides: A markdown string containing backlinks to all the api_guides that
link to the documented object.
"""
assert self.guides is None
self._guides = guides
@property
def namedtuplefields(self):
return self._namedtuplefields
def set_namedtuplefields(self, py_class):
if issubclass(py_class, tuple):
if all(
hasattr(py_class, attr)
for attr in ('_asdict', '_fields', '_make', '_replace')):
self._namedtuplefields = py_class._fields
@property
def bases(self):
"""Returns a list of `_LinkInfo` objects pointing to the class' parents."""
return self._bases
def _set_bases(self, relative_path, parser_config):
"""Builds the `bases` attribute, to document this class' parent-classes.
This method sets the `bases` to a list of `_LinkInfo` objects point to the
doc pages for the class' parents.
Args:
relative_path: The relative path from the doc this object describes to
the documentation root.
parser_config: An instance of `ParserConfig`.
"""
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path,
parser_config.reference_resolver)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = _LinkInfo(short_name=base_full_name.split('.')[-1],
full_name=base_full_name, obj=base,
doc=base_doc, url=base_url)
bases.append(link_info)
self._bases = bases
@property
def properties(self):
"""Returns a list of `_PropertyInfo` describing the class' properties."""
props_dict = {prop.short_name: prop for prop in self._properties}
props = []
if self.namedtuplefields:
for field in self.namedtuplefields:
props.append(props_dict.pop(field))
props.extend(sorted(props_dict.values()))
return props
def _add_property(self, short_name, full_name, obj, doc):
"""Adds a `_PropertyInfo` entry to the `properties` list.
Args:
short_name: The property's short name.
full_name: The property's fully qualified name.
obj: The property object itself
doc: The property's parsed docstring, a `_DocstringInfo`.
"""
# Hide useless namedtuple docs-trings
if re.match('Alias for field number [0-9]+', doc.docstring):
doc = doc._replace(docstring='', brief='')
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@property
def methods(self):
"""Returns a list of `_MethodInfo` describing the class' methods."""
return self._methods
def _add_method(self, short_name, full_name, obj, doc, signature, decorators):
"""Adds a `_MethodInfo` entry to the `methods` list.
Args:
short_name: The method's short name.
full_name: The method's fully qualified name.
obj: The method object itself
doc: The method's parsed docstring, a `_DocstringInfo`
signature: The method's parsed signature (see: `_generate_signature`)
decorators: A list of strings describing the decorators that should be
mentioned on the object's docs page.
"""
method_info = _MethodInfo(short_name, full_name, obj, doc, signature,
decorators)
self._methods.append(method_info)
@property
def classes(self):
"""Returns a list of `_LinkInfo` pointing to any nested classes."""
return self._classes
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
for item in itertools.chain(self.classes, self.properties, self.methods,
self.other_members):
meta_data.append(item)
return meta_data.build_html()
def _add_class(self, short_name, full_name, obj, doc, url):
"""Adds a `_LinkInfo` for a nested class to `classes` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
url: A url pointing to where the nested class is documented.
"""
page_info = _LinkInfo(short_name, full_name, obj, doc, url)
self._classes.append(page_info)
@property
def other_members(self):
"""Returns a list of `_OtherMemberInfo` describing any other contents."""
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
"""Adds an `_OtherMemberInfo` entry to the `other_members` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
"""
other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc)
self._other_members.append(other_member_info)
def collect_docs_for_class(self, py_class, parser_config):
"""Collects information necessary specifically for a class's doc page.
Mainly, this is details about the class's members.
Args:
py_class: The class object being documented
parser_config: An instance of ParserConfig.
"""
self.set_namedtuplefields(py_class)
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for short_name in parser_config.tree[self.full_name]:
# Remove builtin members that we never want to document.
if short_name in [
'__class__', '__base__', '__weakref__', '__doc__', '__module__',
'__dict__', '__abstractmethods__', '__slots__', '__getnewargs__',
'__str__', '__repr__', '__hash__', '__reduce__'
]:
continue
child_name = '.'.join([self.full_name, short_name])
child = parser_config.py_name_to_object(child_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
if defining_class in [object, type, tuple, BaseException, Exception]:
continue
# The following condition excludes most protobuf-defined symbols.
if (defining_class and
defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
if doc_controls.should_skip_class_attr(py_class, short_name):
continue
child_doc = _parse_md_docstring(child, relative_path,
parser_config.reference_resolver)
if isinstance(child, property):
self._add_property(short_name, child_name, child, child_doc)
elif tf_inspect.isclass(child):
if defining_class is None:
continue
url = parser_config.reference_resolver.reference_to_url(
child_name, relative_path)
self._add_class(short_name, child_name, child, child_doc, url)
elif (tf_inspect.ismethod(child) or tf_inspect.isfunction(child) or
tf_inspect.isroutine(child)):
if defining_class is None:
continue
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
continue
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
'__del__', '__copy__'
]:
continue
try:
child_signature = _generate_signature(child,
parser_config.reverse_index)
except TypeError:
# If this is a (dynamically created) slot wrapper, tf_inspect will
# raise typeerror when trying to get to the code. Ignore such
# functions.
continue
child_decorators = []
try:
if isinstance(py_class.__dict__[short_name], classmethod):
child_decorators.append('classmethod')
except KeyError:
pass
try:
if isinstance(py_class.__dict__[short_name], staticmethod):
child_decorators.append('staticmethod')
except KeyError:
pass
self._add_method(short_name, child_name, child, child_doc,
child_signature, child_decorators)
else:
# Exclude members defined by protobuf that are useless
if issubclass(py_class, ProtoMessage):
if (short_name.endswith('_FIELD_NUMBER') or
short_name in ['__slots__', 'DESCRIPTOR']):
continue
# TODO(wicke): We may want to also remember the object itself.
self._add_other_member(short_name, child_name, child, child_doc)
class _ModulePageInfo(object):
"""Collects docs for a module page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
def for_function(self):
return False
def for_class(self):
return False
def for_module(self):
return True
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def modules(self):
return self._modules
def _add_module(self, short_name, full_name, obj, doc, url):
self._modules.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def classes(self):
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
self._classes.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def functions(self):
return self._functions
def _add_function(self, short_name, full_name, obj, doc, url):
self._functions.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def other_members(self):
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
self._other_members.append(
_OtherMemberInfo(short_name, full_name, obj, doc))
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
# Objects with their own pages are not added to the matadata list for the
# module, the module only has a link to the object page. No docs.
for item in self.other_members:
meta_data.append(item)
return meta_data.build_html()
def collect_docs_for_module(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
parser_config: An instance of ParserConfig.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__',
'__name__', '__path__', '__package__',
'__cached__', '__loader__', '__spec__']:
continue
member_full_name = self.full_name + '.' + name if self.full_name else name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path,
parser_config.reference_resolver)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
if tf_inspect.ismodule(member):
self._add_module(name, member_full_name, member, member_doc, url)
elif tf_inspect.isclass(member):
self._add_class(name, member_full_name, member, member_doc, url)
elif tf_inspect.isfunction(member):
self._add_function(name, member_full_name, member, member_doc, url)
else:
self._add_other_member(name, member_full_name, member, member_doc)
class ParserConfig(object):
"""Stores all indexes required to parse the docs."""
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, guide_index, base_dir):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of
all aliases for each name.
duplicate_of: A map from duplicate names to preferred names of API
symbols.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
index: A `dict` mapping full names to objects.
reverse_index: A `dict` mapping object ids to full names.
guide_index: A `dict` mapping symbol name strings to objects with a
`make_md_link()` method.
base_dir: A base path that is stripped from file locations written to the
docs.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.guide_index = guide_index
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
'/code/stable/tensorflow/') # pylint: disable=line-too-long
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self.index[full_name]
def docs_for_object(full_name, py_object, parser_config):
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '@{symbol}' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
Returns:
Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = parser_config.reference_resolver.py_master_name(full_name)
duplicate_names = parser_config.duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
tf_inspect.isroutine(py_object)):
page_info = _FunctionPageInfo(master_name)
page_info.set_signature(py_object, parser_config.reverse_index)
elif tf_inspect.isclass(py_object):
page_info = _ClassPageInfo(master_name)
page_info.collect_docs_for_class(py_object, parser_config)
elif tf_inspect.ismodule(py_object):
page_info = _ModulePageInfo(master_name)
page_info.collect_docs_for_module(parser_config)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(_parse_md_docstring(
py_object, relative_path, parser_config.reference_resolver))
page_info.set_aliases(duplicate_names)
page_info.set_guides(_get_guides_markdown(
duplicate_names, parser_config.guide_index, relative_path))
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
class _PythonBuiltin(object):
"""This class indicated that the object in question is a python builtin.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def is_builtin(self):
return True
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'This is an alias for a Python built-in.\n\n'
class _PythonFile(object):
"""This class indicates that the object is defined in a regular python file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return True
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _ProtoFile(object):
"""This class indicates that the object is defined in a .proto file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _GeneratedFile(object):
"""This class indicates that the object is defined in a generated python file.
Generated files should not be linked to directly.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return True
def __str__(self):
return 'Defined in generated file: `%s%s`.\n\n' % (self.path_prefix,
self.path)
def _get_defined_in(py_object, parser_config):
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`
"""
# Every page gets a note about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(path=tf_inspect.getfile(py_object),
start=parser_config.base_dir)
except TypeError: # getfile throws TypeError if py_object is a builtin.
return _PythonBuiltin()
# TODO(wicke): If this is a generated file, link to the source instead.
# TODO(wicke): Move all generated files to a generated/ directory.
# TODO(wicke): And make their source file predictable from the file name.
# In case this is compiled, point to the original
if path.endswith('.pyc'):
path = path[:-1]
# Never include links outside this code base.
if path.startswith('..') or re.search(r'\b_api\b', path):
return None
if re.match(r'.*/gen_[^/]*\.py$', path):
return _GeneratedFile(path, parser_config)
if 'genfiles' in path or 'tools/api/generator' in path:
return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', path):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(path[:-7] + '.proto', parser_config)
else:
return _PythonFile(path, parser_config)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (tf_inspect.ismodule(py_object) or tf_inspect.isfunction(py_object) or
tf_inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if tf_inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and tf_inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((
full_name, reference_resolver.python_link(full_name, full_name, '.')))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class _Metadata(object):
"""A class for building a page's Metadata block.
Attributes:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
def __init__(self, name, version='Stable'):
"""Creates a Metadata builder.
Args:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
self.name = name
self.version = version
self._content = []
def append(self, item):
"""Adds an item from the page to the Metadata block.
Args:
item: The parsed page section to add.
"""
self._content.append(item.short_name)
def build_html(self):
"""Returns the Metadata block as an Html string."""
schema = 'http://developers.google.com/ReferenceObject'
parts = ['<div itemscope itemtype="%s">' % schema]
parts.append('<meta itemprop="name" content="%s" />' % self.name)
parts.append('<meta itemprop="path" content="%s" />' % self.version)
for item in self._content:
parts.append('<meta itemprop="property" content="%s"/>' % item)
parts.extend(['</div>', ''])
return '\n'.join(parts)
| tensorflow-master | tensorflow/tools/docs/parser.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation control decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_controls
class DocControlsTest(googletest.TestCase):
def test_do_not_generate_docs(self):
@doc_controls.do_not_generate_docs
def dummy_function():
pass
self.assertTrue(doc_controls.should_skip(dummy_function))
def test_do_not_doc_on_method(self):
"""The simple decorator is not aware of inheritance."""
class Parent(object):
@doc_controls.do_not_generate_docs
def my_method(self):
pass
class Child(Parent):
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertFalse(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable(self):
class Parent(object):
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class Child(Parent):
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable_property(self):
class Parent(object):
@property
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class Child(Parent):
@property
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable_staticmethod(self):
class GrandParent(object):
def my_method(self):
pass
class Parent(GrandParent):
@staticmethod
@doc_controls.do_not_doc_inheritable
def my_method():
pass
class Child(Parent):
@staticmethod
def my_method():
pass
class GrandChild(Child):
pass
self.assertFalse(doc_controls.should_skip(GrandParent.my_method))
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_for_subclass_implementers(self):
class GrandParent(object):
def my_method(self):
pass
class Parent(GrandParent):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Child(Parent):
pass
class GrandChild(Child):
def my_method(self):
pass
class Grand2Child(Child):
pass
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertFalse(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
def test_for_subclass_implementers_short_circuit(self):
class GrandParent(object):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Parent(GrandParent):
def my_method(self):
pass
class Child(Parent):
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class GrandChild(Child):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Grand2Child(Child):
pass
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/doc_controls_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_guide_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.platform import test
from tensorflow.tools.docs import py_guide_parser
class TestPyGuideParser(py_guide_parser.PyGuideParser):
def __init__(self):
self.calls = []
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, line_number, title):
self.calls.append((line_number, 't', title))
def process_section(self, line_number, section_title, tag):
self.calls.append((line_number, 's', '%s : %s' % (section_title, tag)))
def process_in_blockquote(self, line_number, line):
self.calls.append((line_number, 'b', line))
self.replace_line(line_number, line + ' BQ')
def process_line(self, line_number, line):
self.calls.append((line_number, 'l', line))
class PyGuideParserTest(test.TestCase):
def testBasics(self):
tmp = os.path.join(test.get_temp_dir(), 'py_guide_parser_test.md')
f = open(tmp, 'w')
f.write("""# a title
a line
## a section
```shell
in a blockquote
```
out of blockquote
""")
f.close()
parser = TestPyGuideParser()
result = parser.process(tmp)
expected = """# a title
a line
## a section
```shell BQ
in a blockquote BQ
```
out of blockquote
"""
self.assertEqual(expected, result)
expected = [(0, 't', 'a title'),
(1, 'l', 'a line'),
(2, 's', 'a section : a_section'),
(3, 'b', '```shell'),
(4, 'b', 'in a blockquote'),
(5, 'l', '```'),
(6, 'l', 'out of blockquote'),
(7, 'l', '')]
self.assertEqual(expected, parser.calls)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/tools/docs/py_guide_parser_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run the python doc generator and fail if there are any broken links."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import textwrap
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.platform import googletest
from tensorflow.python.platform import resource_loader
from tensorflow.tools.docs import generate_lib
class Flags(object):
resource_root = resource_loader.get_root_dir_with_all_resources()
src_dir = os.path.join(googletest.GetTempDir(), 'input')
os.mkdir(src_dir)
base_dir = os.path.join(resource_root, 'tensorflow/')
output_dir = os.path.join(googletest.GetTempDir(), 'output')
os.mkdir(output_dir)
class BuildDocsTest(googletest.TestCase):
def testBuildDocs(self):
doc_generator = generate_lib.DocGenerator()
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
try:
status = doc_generator.build(Flags())
except RuntimeError as e:
if not e.args[0].startswith('Modules nested too deep'):
raise
msg = textwrap.dedent("""\
%s
****************************************************************
If this test fails here, you have most likely introduced an
unsealed module. Make sure to use `remove_undocumented` or similar
utilities to avoid leaking symbols. See above for more information
on the exact point of failure.
****************************************************************
""" % e.args[0])
raise RuntimeError(msg)
if status:
self.fail('Found %s Errors!' % status)
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/build_docs_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for doc generator traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate_lib
from tensorflow.tools.docs import parser
def test_function():
"""Docstring for test_function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
class ChildClass(object):
"""Docstring for a child class."""
class GrandChildClass(object):
"""Docstring for a child of a child class."""
pass
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class GenerateTest(googletest.TestCase):
def get_test_objects(self):
# These are all mutable objects, so rebuild them for each test.
# Don't cache the objects.
module = sys.modules[__name__]
index = {
'tf': sys, # Can be any module, this test doesn't care about content.
'tf.TestModule': module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
'tf.TestModule.TestClass.ChildClass.GrandChildClass':
TestClass.ChildClass.GrandChildClass,
}
tree = {
'tf': ['TestModule', 'test_function'],
'tf.TestModule': ['test_function', 'TestClass'],
'tf.TestModule.TestClass': ['ChildClass'],
'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],
'tf.TestModule.TestClass.ChildClass.GrandChildClass': []
}
duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}
duplicates = {
'tf.TestModule.test_function': [
'tf.test_function', 'tf.TestModule.test_function'
]
}
base_dir = os.path.dirname(__file__)
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=duplicates,
duplicate_of=duplicate_of,
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir=base_dir)
return reference_resolver, parser_config
def test_write(self):
_, parser_config = self.get_test_objects()
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True,
site_api_path='api_docs/python')
# Check redirects
redirects_file = os.path.join(output_dir, '_redirects.yaml')
self.assertTrue(os.path.exists(redirects_file))
with open(redirects_file) as f:
redirects = f.read()
self.assertEqual(redirects.split(), [
'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
'/api_docs/python/tf/TestModule/test_function'
])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))
self.assertTrue(
os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/TestClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir,
'tf/TestModule/TestClass/ChildClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(
output_dir,
'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))
# Make sure that duplicates are not written
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
def test_update_id_tags_inplace(self):
test_dir = googletest.GetTempDir()
test_sub_dir = os.path.join(test_dir, 'a/b')
os.makedirs(test_sub_dir)
test_path1 = os.path.join(test_dir, 'file1.md')
test_path2 = os.path.join(test_sub_dir, 'file2.md')
test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
with open(test_path1, 'w') as f:
f.write('## abc&123')
with open(test_path2, 'w') as f:
f.write('# A Level 1 Heading\n')
f.write('## A Level 2 Heading')
with open(test_path3, 'w') as f:
f.write("## don\'t change this")
generate_lib.update_id_tags_inplace(test_dir)
with open(test_path1) as f:
content = f.read()
self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
with open(test_path2) as f:
content = f.read()
self.assertEqual(
content, '# A Level 1 Heading\n'
'<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
with open(test_path3) as f:
content = f.read()
self.assertEqual(content, "## don\'t change this")
def test_replace_refes(self):
test_dir = googletest.GetTempDir()
test_in_dir = os.path.join(test_dir, 'in')
test_in_dir_a = os.path.join(test_dir, 'in/a')
test_in_dir_b = os.path.join(test_dir, 'in/b')
os.makedirs(test_in_dir)
os.makedirs(test_in_dir_a)
os.makedirs(test_in_dir_b)
test_out_dir = os.path.join(test_dir, 'out')
os.makedirs(test_out_dir)
test_path1 = os.path.join(test_in_dir_a, 'file1.md')
test_path2 = os.path.join(test_in_dir_b, 'file2.md')
test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
with open(test_path1, 'w') as f:
f.write('Use `tf.test_function` to test things.')
with open(test_path2, 'w') as f:
f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
"`tf.whatever` doesn't exist")
with open(test_path3, 'w') as f:
file3_content = (
'Not a .md file. Should be copied unchanged:'
'@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
f.write(file3_content)
with open(test_path4, 'w') as f:
f.write('')
reference_resolver, _ = self.get_test_objects()
generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
'*.md')
with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
'<code>tf.test_function</code></a> to test things.')
with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use '
'<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
'<code>tf.TestModule.TestClass.ChildClass</code></a> '
'to test things.\n'
'`tf.whatever` doesn\'t exist')
with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
content = f.read()
self.assertEqual(content, file3_content)
with self.assertRaises(IOError):
# This should fail. The OWNERS file should not be copied
with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
content = f.read()
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/docs/generate_lib_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoke test for reading records from GCS to TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.python.lib.io import file_io
flags = tf.app.flags
flags.DEFINE_string("gcs_bucket_url", "",
"The URL to the GCS bucket in which the temporary "
"tfrecord file is to be written and read, e.g., "
"gs://my-gcs-bucket/test-directory")
flags.DEFINE_integer("num_examples", 10, "Number of examples to generate")
FLAGS = flags.FLAGS
def create_examples(num_examples, input_mean):
"""Create ExampleProto's containing data."""
ids = np.arange(num_examples).reshape([num_examples, 1])
inputs = np.random.randn(num_examples, 1) + input_mean
target = inputs - input_mean
examples = []
for row in range(num_examples):
ex = example_pb2.Example()
ex.features.feature["id"].bytes_list.value.append(str(ids[row, 0]))
ex.features.feature["target"].float_list.value.append(target[row, 0])
ex.features.feature["inputs"].float_list.value.append(inputs[row, 0])
examples.append(ex)
return examples
def create_dir_test():
"""Verifies file_io directory handling methods."""
# Test directory creation.
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
assert dir_exists
print("%s directory exists: %s" % (dir_name, dir_exists))
# Test recursive directory creation.
starttime_ms = int(round(time.time() * 1000))
recursive_dir_name = "%s/%s/%s" % (dir_name,
"nested_dir1",
"nested_dir2")
print("Creating recursive dir %s" % recursive_dir_name)
file_io.recursive_create_dir(recursive_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory recursively in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
recursive_dir_exists = file_io.is_directory(recursive_dir_name)
assert recursive_dir_exists
print("%s directory exists: %s" % (recursive_dir_name, recursive_dir_exists))
# Create some contents in the just created directory and list the contents.
num_files = 10
files_to_create = ["file_%d.txt" % n for n in range(num_files)]
for file_num in files_to_create:
file_name = "%s/%s" % (dir_name, file_num)
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file.")
print("Listing directory %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
directory_contents = file_io.list_directory(dir_name)
print(directory_contents)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed directory %s in %s milliseconds" % (dir_name, elapsed_ms))
assert set(directory_contents) == set(files_to_create + ["nested_dir1/"])
# Test directory renaming.
dir_to_rename = "%s/old_dir" % dir_name
new_dir_name = "%s/new_dir" % dir_name
file_io.create_dir(dir_to_rename)
assert file_io.is_directory(dir_to_rename)
assert not file_io.is_directory(new_dir_name)
starttime_ms = int(round(time.time() * 1000))
print("Will try renaming directory %s to %s" % (dir_to_rename, new_dir_name))
file_io.rename(dir_to_rename, new_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Renamed directory %s to %s in %s milliseconds" % (
dir_to_rename, new_dir_name, elapsed_ms))
assert not file_io.is_directory(dir_to_rename)
assert file_io.is_directory(new_dir_name)
# Test Delete directory recursively.
print("Deleting directory recursively %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
dir_exists = file_io.is_directory(dir_name)
assert not dir_exists
print("Deleted directory recursively %s in %s milliseconds" % (
dir_name, elapsed_ms))
def create_object_test():
"""Verifies file_io's object manipulation methods ."""
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s." % dir_name)
file_io.create_dir(dir_name)
num_files = 5
# Create files of 2 different patterns in this directory.
files_pattern_1 = ["%s/test_file_%d.txt" % (dir_name, n)
for n in range(num_files)]
files_pattern_2 = ["%s/testfile%d.txt" % (dir_name, n)
for n in range(num_files)]
starttime_ms = int(round(time.time() * 1000))
files_to_create = files_pattern_1 + files_pattern_2
for file_name in files_to_create:
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file creation.")
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created %d files in %s milliseconds" % (
len(files_to_create), elapsed_ms))
# Listing files of pattern1.
list_files_pattern = "%s/test_file*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_1)
# Listing files of pattern2.
list_files_pattern = "%s/testfile*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_2)
# Test renaming file.
file_to_rename = "%s/oldname.txt" % dir_name
file_new_name = "%s/newname.txt" % dir_name
file_io.write_string_to_file(file_to_rename, "test file.")
assert file_io.file_exists(file_to_rename)
assert not file_io.file_exists(file_new_name)
print("Will try renaming file %s to %s" % (file_to_rename, file_new_name))
starttime_ms = int(round(time.time() * 1000))
file_io.rename(file_to_rename, file_new_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("File %s renamed to %s in %s milliseconds" % (
file_to_rename, file_new_name, elapsed_ms))
assert not file_io.file_exists(file_to_rename)
assert file_io.file_exists(file_new_name)
# Delete directory.
print("Deleting directory %s." % dir_name)
file_io.delete_recursively(dir_name)
def main(argv):
del argv # Unused.
# Sanity check on the GCS bucket URL.
if not FLAGS.gcs_bucket_url or not FLAGS.gcs_bucket_url.startswith("gs://"):
print("ERROR: Invalid GCS bucket URL: \"%s\"" % FLAGS.gcs_bucket_url)
sys.exit(1)
# Generate random tfrecord path name.
input_path = FLAGS.gcs_bucket_url + "/"
input_path += "".join(random.choice("0123456789ABCDEF") for i in range(8))
input_path += ".tfrecord"
print("Using input path: %s" % input_path)
# Verify that writing to the records file in GCS works.
print("\n=== Testing writing and reading of GCS record file... ===")
example_data = create_examples(FLAGS.num_examples, 5)
with tf.python_io.TFRecordWriter(input_path) as hf:
for e in example_data:
hf.write(e.SerializeToString())
print("Data written to: %s" % input_path)
# Verify that reading from the tfrecord file works and that
# tf_record_iterator works.
record_iter = tf.python_io.tf_record_iterator(input_path)
read_count = 0
for _ in record_iter:
read_count += 1
print("Read %d records using tf_record_iterator" % read_count)
if read_count != FLAGS.num_examples:
print("FAIL: The number of records read from tf_record_iterator (%d) "
"differs from the expected number (%d)" % (read_count,
FLAGS.num_examples))
sys.exit(1)
# Verify that running the read op in a session works.
print("\n=== Testing TFRecordReader.read op in a session... ===")
with tf.Graph().as_default():
filename_queue = tf.train.string_input_producer([input_path], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
index = 0
for _ in range(FLAGS.num_examples):
print("Read record: %d" % index)
sess.run(serialized_example)
index += 1
# Reading one more record should trigger an exception.
try:
sess.run(serialized_example)
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
create_dir_test()
create_object_test()
if __name__ == "__main__":
tf.app.run(main)
| tensorflow-master | tensorflow/tools/gcs_test/python/gcs_smoke.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'compiler',
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
# Externalized modules of the Keras API.
'tf.keras': ['applications', 'preprocessing']
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
| tensorflow-master | tensorflow/tools/common/public_api.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python module traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.common import test_module1
from tensorflow.tools.common import test_module2
from tensorflow.tools.common import traverse
class TestVisitor(object):
def __init__(self):
self.call_log = []
def __call__(self, path, parent, children):
self.call_log += [(path, parent, children)]
class TraverseTest(googletest.TestCase):
def test_cycle(self):
class Cyclist(object):
pass
Cyclist.cycle = Cyclist
visitor = TestVisitor()
traverse.traverse(Cyclist, visitor)
# We simply want to make sure we terminate.
def test_module(self):
visitor = TestVisitor()
traverse.traverse(test_module1, visitor)
called = [parent for _, parent, _ in visitor.call_log]
self.assertIn(test_module1.ModuleClass1, called)
self.assertIn(test_module2.ModuleClass2, called)
def test_class(self):
visitor = TestVisitor()
traverse.traverse(TestVisitor, visitor)
self.assertEqual(TestVisitor,
visitor.call_log[0][1])
# There are a bunch of other members, but make sure that the ones we know
# about are there.
self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]])
self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]])
# There are more classes descended into, at least __class__ and
# __class__.__base__, neither of which are interesting to us, and which may
# change as part of Python version etc., so we don't test for them.
def test_non_class(self):
integer = 5
visitor = TestVisitor()
traverse.traverse(integer, visitor)
self.assertEqual([], visitor.call_log)
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/common/traverse_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module target for TraverseTest.test_module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.common import test_module2
class ModuleClass1(object):
def __init__(self):
self._m2 = test_module2.ModuleClass2()
def __model_class1_method__(self):
pass
| tensorflow-master | tensorflow/tools/common/test_module1.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Traversing Python modules and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import sys
from tensorflow.python.util import tf_inspect
__all__ = ['traverse']
def _traverse_internal(root, visit, stack, path):
"""Internal helper for traverse."""
# Only traverse modules and classes
if not tf_inspect.isclass(root) and not tf_inspect.ismodule(root):
return
try:
children = tf_inspect.getmembers(root)
# Add labels for duplicate values in Enum.
if tf_inspect.isclass(root) and issubclass(root, enum.Enum):
for enum_member in root.__members__.items():
if enum_member not in children:
children.append(enum_member)
children = sorted(children)
except ImportError:
# On some Python installations, some modules do not support enumerating
# members (six in particular), leading to import errors.
children = []
new_stack = stack + [root]
visit(path, root, children)
for name, child in children:
# Do not descend into built-in modules
if tf_inspect.ismodule(
child) and child.__name__ in sys.builtin_module_names:
continue
# Break cycles
if any(child is item for item in new_stack): # `in`, but using `is`
continue
child_path = path + '.' + name if path else name
_traverse_internal(child, visit, new_stack, child_path)
def traverse(root, visit):
"""Recursively enumerate all members of `root`.
Similar to the Python library function `os.path.walk`.
Traverses the tree of Python objects starting with `root`, depth first.
Parent-child relationships in the tree are defined by membership in modules or
classes. The function `visit` is called with arguments
`(path, parent, children)` for each module or class `parent` found in the tree
of python objects starting with `root`. `path` is a string containing the name
with which `parent` is reachable from the current context. For example, if
`root` is a local class called `X` which contains a class `Y`, `visit` will be
called with `('Y', X.Y, children)`).
If `root` is not a module or class, `visit` is never called. `traverse`
never descends into built-in modules.
`children`, a list of `(name, object)` pairs are determined by
`tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can
be modified in place, using `del` or slice assignment.
Cycles (determined by reference equality, `is`) stop the traversal. A stack of
objects is kept to find cycles. Objects forming cycles may appear in
`children`, but `visit` will not be called with any object as `parent` which
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which blacklists such modules.
Args:
root: A python object with which to start the traversal.
visit: A function taking arguments `(path, parent, children)`. Will be
called for each object found in the traversal.
"""
_traverse_internal(root, visit, [], '')
| tensorflow-master | tensorflow/tools/common/traverse.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module target for TraverseTest.test_module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModuleClass2(object):
def __init__(self):
pass
def __model_class1_method__(self):
pass
| tensorflow-master | tensorflow/tools/common/test_module2.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.tools.common.public_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.common import public_api
class PublicApiTest(googletest.TestCase):
class TestVisitor(object):
def __init__(self):
self.symbols = set()
self.last_parent = None
self.last_children = None
def __call__(self, path, parent, children):
self.symbols.add(path)
self.last_parent = parent
self.last_children = list(children) # Make a copy to preserve state.
def test_call_forward(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
self.assertEqual(set(['test']), visitor.symbols)
self.assertEqual('dummy', visitor.last_parent)
self.assertEqual([('name1', 'thing1'), ('name2', 'thing2')],
visitor.last_children)
def test_private_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('_name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure the private symbols are removed before the visitor is called.
self.assertEqual([('name1', 'thing1')], visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
def test_no_descent_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('mock', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure not-to-be-descended-into symbols are removed after the visitor
# is called.
self.assertEqual([('name1', 'thing1'), ('mock', 'thing2')],
visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
if __name__ == '__main__':
googletest.main()
| tensorflow-master | tensorflow/tools/common/public_api_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a Python module containing information about the build."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
def write_build_info(filename, build_config, key_value_list):
"""Writes a Python that describes the build.
Args:
filename: filename to write to.
build_config: A string that represents the config used in this build (e.g.
"cuda").
key_value_list: A list of "key=value" strings that will be added to the
module as additional fields.
Raises:
ValueError: If `key_value_list` includes the key "is_cuda_build", which
would clash with one of the default fields.
"""
module_docstring = "\"\"\"Generates a Python module containing information "
module_docstring += "about the build.\"\"\""
if build_config == "cuda":
build_config_bool = "True"
else:
build_config_bool = "False"
key_value_pair_stmts = []
if key_value_list:
for arg in key_value_list:
key, value = arg.split("=")
if key == "is_cuda_build":
raise ValueError("The key \"is_cuda_build\" cannot be passed as one of "
"the --key_value arguments.")
key_value_pair_stmts.append("%s = %r" % (key, value))
key_value_pair_content = "\n".join(key_value_pair_stmts)
contents = """
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
%s
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
is_cuda_build = %s
%s
""" % (module_docstring, build_config_bool, key_value_pair_content)
open(filename, "w").write(contents)
parser = argparse.ArgumentParser(
description="""Build info injection into the PIP package.""")
parser.add_argument(
"--build_config",
type=str,
help="Either 'cuda' for GPU builds or 'cpu' for CPU builds.")
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
parser.add_argument("--key_value", type=str, nargs="*",
help="List of key=value pairs.")
args = parser.parse_args()
if args.raw_generate is not None and args.build_config is not None:
write_build_info(args.raw_generate, args.build_config, args.key_value)
else:
raise RuntimeError("--raw_generate and --build_config must be used")
| tensorflow-master | tensorflow/tools/build_info/gen_build_info.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests deprecation warnings in a few special cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunction(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.initializers.tables_initializer()
self.assertEqual(0, mock_warning.call_count)
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"tables_initializer")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.tables_initializer")
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClass(self, mock_warning):
value = np.array([1, 2, 3])
row_splits = np.array([1])
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(0, mock_warning.call_count)
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"ragged.RaggedTensorValue")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.ragged.RaggedTensorValue")
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunctionEndpoint(self, mock_warning):
array = tf.IndexedSlices(
tf.compat.v1.convert_to_tensor(np.array([1, 2])),
tf.compat.v1.convert_to_tensor(np.array([0, 2])))
mask_indices = tf.compat.v1.convert_to_tensor(np.array([2]))
self.assertEqual(0, mock_warning.call_count)
tf.sparse.mask(array, mask_indices)
self.assertEqual(0, mock_warning.call_count)
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"sparse_mask")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
"sparse.mask")
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClassEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.io.VarLenFeature(tf.dtypes.int32)
self.assertEqual(0, mock_warning.call_count)
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"VarLenFeature")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"io.VarLenFeature")
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedConstantEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2],
r"saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecationNoWarning(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.layers.GRUCell(20)
self.assertLessEqual(mock_warning.call_count, 1)
if mock_warning.call_count == 1:
# The only message printed should be due to referencing init op.
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"Call initializer instance with the dtype argument instead of "
"passing it to the constructor")
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.compat.v1.keras.backend.get_session")
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasEndpointDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.keras.losses.cosine_similarity")
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testEstimatorDeprecation(self, mock_warning):
if "KMeans" in tf.estimator.experimental.__dict__:
self.assertEqual(0, mock_warning.call_count)
tf.estimator.experimental.KMeans(2)
self.assertEqual(2, mock_warning.call_count)
# First message is not a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[1][0][0],
"Using temporary folder as model directory:")
# Second message is a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[0][0][-1],
"tf.compat.v1.estimator.experimental.KMeans")
if __name__ == "__main__":
test.main()
| tensorflow-master | tensorflow/tools/api/tests/deprecation_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import six
import tensorflow as tf
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
_NON_CORE_PACKAGES = ['estimator']
# TODO(annarev): remove this once we test with newer version of
# estimator that actually has compat v1 version.
if not hasattr(tf.compat.v1, 'estimator'):
tf.compat.v1.estimator = tf.estimator
tf.compat.v2.estimator = tf.estimator
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
Args:
key: a string used to determine the file path
api_version: a number indicating the tensorflow API version, e.g. 1 or 2.
Returns:
A string of file path to the pbtxt file which describes the public API
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
base_filename_without_ext)
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
):
continue
filtered_file_list.append(f)
return filtered_file_list
def _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):
"""Filter out golden proto dict symbols that should be omitted."""
if not omit_golden_symbols_map:
return golden_proto_dict
filtered_proto_dict = dict(golden_proto_dict)
for key, symbol_list in six.iteritems(omit_golden_symbols_map):
api_object = api_objects_pb2.TFAPIObject()
api_object.CopyFrom(filtered_proto_dict[key])
filtered_proto_dict[key] = api_object
module_or_class = None
if api_object.HasField('tf_module'):
module_or_class = api_object.tf_module
elif api_object.HasField('tf_class'):
module_or_class = api_object.tf_class
if module_or_class is not None:
for members in (module_or_class.member, module_or_class.member_method):
filtered_members = [m for m in members if m.name not in symbol_list]
# Two steps because protobuf repeated fields disallow slice assignment.
del members[:]
members.extend(filtered_members)
return filtered_proto_dict
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None,
omit_golden_symbols_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'] = ['contrib']
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,
omit_golden_symbols_map)
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
@test_util.run_v1_only('b/120545219')
def testAPIBackwardsCompatibility(self):
api_version = 2 if '_api.v2' in tf.bitwise.__name__ else 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if api_version == 2 and FLAGS.only_test_core_api:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
# Also check that V1 API has contrib
self.assertTrue(
'tensorflow.python.util.lazy_loader.LazyLoader'
in str(type(tf.contrib)))
@test_util.run_v1_only('b/120545219')
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf.compat.v1, golden_file_pattern, api_version,
additional_private_map={'tf': ['pywrap_tensorflow']},
omit_golden_symbols_map={'tensorflow': ['pywrap_tensorflow']})
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if FLAGS.only_test_core_api:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf.compat.v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=True, # only_test_core_api default value
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| tensorflow-master | tensorflow/tools/api/tests/api_compatibility_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""A visitor class that generates protobufs for each python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import enum
from google.protobuf import message
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.api.lib import api_objects_pb2
# Following object need to be handled individually.
_CORNER_CASES = {
'': {
'tools': {}
},
'test.TestCase': {},
'test.TestCase.failureException': {},
'train.NanLossDuringTrainingError': {
'message': {}
},
'estimator.NanLossDuringTrainingError': {
'message': {}
},
'train.LooperThread': {
'join': {}
}
}
# Python 2 vs. 3 differences
if sys.version_info.major == 3:
_NORMALIZE_TYPE = {}
for t in ('property', 'object', 'getset_descriptor', 'int', 'str', 'type',
'tuple', 'module', 'collections.defaultdict', 'set', 'dict',
'NoneType', 'frozenset'):
_NORMALIZE_TYPE["<class '%s'>" % t] = "<type '%s'>" % t
for e in 'Exception', 'RuntimeError':
_NORMALIZE_TYPE["<class '%s'>" % e] = "<type 'exceptions.%s'>" % e
_NORMALIZE_TYPE["<class 'abc.ABCMeta'>"] = "<type 'type'>"
_NORMALIZE_ISINSTANCE = {
"<class "
"'tensorflow.lite.python.op_hint.OpHint.OpHintArgumentTracker'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.lite.python.op_hint.OpHintArgumentTracker'>",
"<class "
"'tensorflow.python.training.monitored_session._MonitoredSession.StepContext'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.python.training.monitored_session.StepContext'>",
"<class "
"'tensorflow.python.ops.variables.Variable.SaveSliceInfo'>":
"<class "
"'tensorflow.python.ops.variables.SaveSliceInfo'>"
}
def _SkipMember(cls, member):
return (member == 'with_traceback' or member in ('name', 'value') and
isinstance(cls, type) and issubclass(cls, enum.Enum))
else:
_NORMALIZE_TYPE = {"<class 'abc.ABCMeta'>": "<type 'type'>"}
_NORMALIZE_ISINSTANCE = {}
def _SkipMember(cls, member): # pylint: disable=unused-argument
return False
def _NormalizeType(ty):
return _NORMALIZE_TYPE.get(ty, ty)
def _NormalizeIsInstance(ty):
return _NORMALIZE_ISINSTANCE.get(ty, ty)
def _SanitizedArgSpec(obj):
"""Get an ArgSpec string that is free of addresses.
We have callables as function arg defaults. This results in addresses in
getargspec output. This function returns a sanitized string list of base
classes.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
string, a string representation of the argspec.
"""
output_string = ''
unsanitized_arg_spec = tf_inspect.getargspec(obj)
for clean_attr in ('args', 'varargs', 'keywords'):
output_string += '%s=%s, ' % (clean_attr,
getattr(unsanitized_arg_spec, clean_attr))
if unsanitized_arg_spec.defaults:
sanitized_defaults = []
for val in unsanitized_arg_spec.defaults:
str_val = str(val)
# Sanitize argspecs that have hex code in them.
if ' at 0x' in str_val:
sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])
else:
sanitized_defaults.append(str_val)
output_string += 'defaults=%s, ' % sanitized_defaults
else:
output_string += 'defaults=None'
return output_string
def _SanitizedMRO(obj):
"""Get a list of superclasses with minimal amount of non-TF classes.
Based on many parameters like python version, OS, protobuf implementation
or changes in google core libraries the list of superclasses of a class
can change. We only return the first non-TF class to be robust to non API
affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`
is still maintained in the return value.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
list of strings, string representation of the class names.
"""
return_list = []
for cls in tf_inspect.getmro(obj):
if cls.__name__ == '_NewClass':
# Ignore class created by @deprecated_alias decorator.
continue
str_repr = _NormalizeType(str(cls))
return_list.append(str_repr)
if 'tensorflow' not in str_repr:
break
# Hack - tensorflow.test.StubOutForTesting may or may not be type <object>
# depending on the environment. To avoid inconsistency, break after we add
# StubOutForTesting to the return_list.
if 'StubOutForTesting' in str_repr:
break
return return_list
def _IsProtoClass(obj):
"""Returns whether the passed obj is a Protocol Buffer class."""
return isinstance(obj, type) and issubclass(obj, message.Message)
class PythonObjectToProtoVisitor(object):
"""A visitor that summarizes given python objects as protobufs."""
def __init__(self):
# A dict to store all protocol buffers.
# Keyed by "path" to the object.
self._protos = {}
def GetProtos(self):
"""Return the list of protos stored."""
return self._protos
def __call__(self, path, parent, children):
# The path to the object.
lib_path = 'tensorflow.%s' % path if path else 'tensorflow'
_, parent = tf_decorator.unwrap(parent)
# A small helper method to construct members(children) protos.
def _AddMember(member_name, member_obj, proto):
"""Add the child object to the object being constructed."""
_, member_obj = tf_decorator.unwrap(member_obj)
if (_SkipMember(parent, member_name) or
isinstance(member_obj, deprecation.HiddenTfApiAttribute)):
return
if member_name == '__init__' or not member_name.startswith('_'):
if tf_inspect.isroutine(member_obj):
new_method = proto.member_method.add()
new_method.name = member_name
# If member_obj is a python builtin, there is no way to get its
# argspec, because it is implemented on the C side. It also has no
# func_code.
if hasattr(member_obj, '__code__'):
new_method.argspec = _SanitizedArgSpec(member_obj)
else:
new_member = proto.member.add()
new_member.name = member_name
if tf_inspect.ismodule(member_obj):
new_member.mtype = "<type \'module\'>"
else:
new_member.mtype = _NormalizeType(str(type(member_obj)))
parent_corner_cases = _CORNER_CASES.get(path, {})
if path not in _CORNER_CASES or parent_corner_cases:
# Decide if we have a module or a class.
if tf_inspect.ismodule(parent):
# Create a module object.
module_obj = api_objects_pb2.TFAPIModule()
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
module_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, module_obj)
# Store the constructed module object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_module=module_obj)
elif _IsProtoClass(parent):
proto_obj = api_objects_pb2.TFAPIProto()
parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)
# Store the constructed proto object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_proto=proto_obj)
elif tf_inspect.isclass(parent):
# Construct a class.
class_obj = api_objects_pb2.TFAPIClass()
class_obj.is_instance.extend(
_NormalizeIsInstance(i) for i in _SanitizedMRO(parent))
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
class_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, class_obj)
# Store the constructed class object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_class=class_obj)
else:
logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'
'Object is neither a module nor a class: %s', path)
| tensorflow-master | tensorflow/tools/api/lib/python_object_to_proto_visitor.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exposes the Python wrapper for graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.pywrap_tensorflow import TransformGraphWithStringInputs
from tensorflow.python.util import compat
def TransformGraph(input_graph_def, inputs, outputs, transforms):
"""Python wrapper for the Graph Transform Tool.
Gives access to all graph transforms available through the command line tool.
See documentation at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md
for full details of the options available.
Args:
input_graph_def: GraphDef object containing a model to be transformed.
inputs: List of node names for the model inputs.
outputs: List of node names for the model outputs.
transforms: List of strings containing transform names and parameters.
Returns:
New GraphDef with transforms applied.
"""
input_graph_def_string = input_graph_def.SerializeToString()
inputs_string = compat.as_bytes(",".join(inputs))
outputs_string = compat.as_bytes(",".join(outputs))
transforms_string = compat.as_bytes(" ".join(transforms))
with errors.raise_exception_on_not_ok_status() as status:
output_graph_def_string = TransformGraphWithStringInputs(
input_graph_def_string, inputs_string, outputs_string,
transforms_string, status)
output_graph_def = graph_pb2.GraphDef()
output_graph_def.ParseFromString(output_graph_def_string)
return output_graph_def
| tensorflow-master | tensorflow/tools/graph_transforms/__init__.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
| tensorflow-master | tensorflow/tools/graph_transforms/python/transform_graph_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Multipurpose TensorFlow Docker Helper.
- Assembles Dockerfiles
- Builds images (and optionally runs image tests)
- Pushes images to Docker Hub (provided with credentials)
Logs are written to stderr; the list of successfully built images is
written to stdout.
Read README.md (in this directory) for instructions!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import errno
import itertools
import multiprocessing
import os
import platform
import re
import shutil
import sys
import json
from absl import app
from absl import flags
import cerberus
import docker
import yaml
FLAGS = flags.FLAGS
flags.DEFINE_string('hub_username', None,
'Dockerhub username, only used with --upload_to_hub')
flags.DEFINE_string(
'hub_password', None,
('Dockerhub password, only used with --upload_to_hub. Use from an env param'
' so your password isn\'t in your history.'))
flags.DEFINE_integer('hub_timeout', 3600,
'Abort Hub upload if it takes longer than this.')
flags.DEFINE_string(
'repository', 'tensorflow',
'Tag local images as {repository}:tag (in addition to the '
'hub_repository, if uploading to hub)')
flags.DEFINE_string(
'hub_repository', None,
'Push tags to this Docker Hub repository, e.g. tensorflow/tensorflow')
flags.DEFINE_boolean(
'upload_to_hub',
False,
('Push built images to Docker Hub (you must also provide --hub_username, '
'--hub_password, and --hub_repository)'),
short_name='u',
)
flags.DEFINE_boolean(
'construct_dockerfiles', False, 'Do not build images', short_name='d')
flags.DEFINE_boolean(
'keep_temp_dockerfiles',
False,
'Retain .temp.Dockerfiles created while building images.',
short_name='k')
flags.DEFINE_boolean(
'build_images', False, 'Do not build images', short_name='b')
flags.DEFINE_string(
'run_tests_path', None,
('Execute test scripts on generated Dockerfiles before pushing them. '
'Flag value must be a full path to the "tests" directory, which is usually'
' $(realpath ./tests). A failed tests counts the same as a failed build.'))
flags.DEFINE_boolean(
'stop_on_failure', False,
('Stop processing tags if any one build fails. If False or not specified, '
'failures are reported but do not affect the other images.'))
flags.DEFINE_boolean(
'dry_run',
False,
'Do not build or deploy anything at all.',
short_name='n',
)
flags.DEFINE_string(
'exclude_tags_matching',
None,
('Regular expression that skips processing on any tag it matches. Must '
'match entire string, e.g. ".*gpu.*" ignores all GPU tags.'),
short_name='x')
flags.DEFINE_string(
'only_tags_matching',
None,
('Regular expression that skips processing on any tag it does not match. '
'Must match entire string, e.g. ".*gpu.*" includes only GPU tags.'),
short_name='i')
flags.DEFINE_string(
'dockerfile_dir',
'./dockerfiles', 'Path to an output directory for Dockerfiles.'
' Will be created if it doesn\'t exist.'
' Existing files in this directory will be deleted when new Dockerfiles'
' are made.',
short_name='o')
flags.DEFINE_string(
'partial_dir',
'./partials',
'Path to a directory containing foo.partial.Dockerfile partial files.'
' can have subdirectories, e.g. "bar/baz.partial.Dockerfile".',
short_name='p')
flags.DEFINE_multi_string(
'release', [],
'Set of releases to build and tag. Defaults to every release type.',
short_name='r')
flags.DEFINE_multi_string(
'arg', [],
('Extra build arguments. These are used for expanding tag names if needed '
'(e.g. --arg _TAG_PREFIX=foo) and for using as build arguments (unused '
'args will print a warning).'),
short_name='a')
flags.DEFINE_boolean(
'nocache', False,
'Disable the Docker build cache; identical to "docker build --no-cache"')
flags.DEFINE_string(
'spec_file',
'./spec.yml',
'Path to the YAML specification file',
short_name='s')
# Schema to verify the contents of tag-spec.yml with Cerberus.
# Must be converted to a dict from yaml to work.
# Note: can add python references with e.g.
# !!python/name:builtins.str
# !!python/name:__main__.funcname
SCHEMA_TEXT = """
header:
type: string
slice_sets:
type: dict
keyschema:
type: string
valueschema:
type: list
schema:
type: dict
schema:
add_to_name:
type: string
dockerfile_exclusive_name:
type: string
dockerfile_subdirectory:
type: string
partials:
type: list
schema:
type: string
ispartial: true
test_runtime:
type: string
required: false
tests:
type: list
default: []
schema:
type: string
args:
type: list
default: []
schema:
type: string
isfullarg: true
releases:
type: dict
keyschema:
type: string
valueschema:
type: dict
schema:
is_dockerfiles:
type: boolean
required: false
default: false
upload_images:
type: boolean
required: false
default: true
tag_specs:
type: list
required: true
schema:
type: string
"""
class TfDockerTagValidator(cerberus.Validator):
"""Custom Cerberus validator for TF tag spec.
Note: Each _validate_foo function's docstring must end with a segment
describing its own validation schema, e.g. "The rule's arguments are...". If
you add a new validator, you can copy/paste that section.
"""
def __init__(self, *args, **kwargs):
# See http://docs.python-cerberus.org/en/stable/customize.html
if 'partials' in kwargs:
self.partials = kwargs['partials']
super(cerberus.Validator, self).__init__(*args, **kwargs)
def _validate_ispartial(self, ispartial, field, value):
"""Validate that a partial references an existing partial spec.
Args:
ispartial: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if ispartial and value not in self.partials:
self._error(field,
'{} is not present in the partials directory.'.format(value))
def _validate_isfullarg(self, isfullarg, field, value):
"""Validate that a string is either a FULL=arg or NOT.
Args:
isfullarg: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if isfullarg and '=' not in value:
self._error(field, '{} should be of the form ARG=VALUE.'.format(value))
if not isfullarg and '=' in value:
self._error(field, '{} should be of the form ARG (no =).'.format(value))
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, flush=True, **kwargs)
def aggregate_all_slice_combinations(spec, slice_set_names):
"""Figure out all of the possible slice groupings for a tag spec."""
slice_sets = copy.deepcopy(spec['slice_sets'])
for name in slice_set_names:
for slice_set in slice_sets[name]:
slice_set['set_name'] = name
slices_grouped_but_not_keyed = [slice_sets[name] for name in slice_set_names]
all_slice_combos = list(itertools.product(*slices_grouped_but_not_keyed))
return all_slice_combos
def build_name_from_slices(format_string, slices, args, is_dockerfile=False):
"""Build the tag name (cpu-devel...) from a list of slices."""
name_formatter = copy.deepcopy(args)
name_formatter.update({s['set_name']: s['add_to_name'] for s in slices})
name_formatter.update({
s['set_name']: s['dockerfile_exclusive_name']
for s in slices
if is_dockerfile and 'dockerfile_exclusive_name' in s
})
name = format_string.format(**name_formatter)
return name
def update_args_dict(args_dict, updater):
"""Update a dict of arg values with more values from a list or dict."""
if isinstance(updater, list):
for arg in updater:
key, sep, value = arg.partition('=')
if sep == '=':
args_dict[key] = value
if isinstance(updater, dict):
for key, value in updater.items():
args_dict[key] = value
return args_dict
def get_slice_sets_and_required_args(slice_sets, tag_spec):
"""Extract used-slice-sets and required CLI arguments from a spec string.
For example, {FOO}{bar}{bat} finds FOO, bar, and bat. Assuming bar and bat
are both named slice sets, FOO must be specified on the command line.
Args:
slice_sets: Dict of named slice sets
tag_spec: The tag spec string, e.g. {_FOO}{blep}
Returns:
(used_slice_sets, required_args), a tuple of lists
"""
required_args = []
used_slice_sets = []
extract_bracketed_words = re.compile(r'\{([^}]+)\}')
possible_args_or_slice_set_names = extract_bracketed_words.findall(tag_spec)
for name in possible_args_or_slice_set_names:
if name in slice_sets:
used_slice_sets.append(name)
else:
required_args.append(name)
return (used_slice_sets, required_args)
def gather_tag_args(slices, cli_input_args, required_args):
"""Build a dictionary of all the CLI and slice-specified args for a tag."""
args = {}
for s in slices:
args = update_args_dict(args, s['args'])
args = update_args_dict(args, cli_input_args)
for arg in required_args:
if arg not in args:
eprint(('> Error: {} is not a valid slice_set, and also isn\'t an arg '
'provided on the command line. If it is an arg, please specify '
'it with --arg. If not, check the slice_sets list.'.format(arg)))
exit(1)
return args
def gather_slice_list_items(slices, key):
"""For a list of slices, get the flattened list of all of a certain key."""
return list(itertools.chain(*[s[key] for s in slices if key in s]))
def find_first_slice_value(slices, key):
"""For a list of slices, get the first value for a certain key."""
for s in slices:
if key in s and s[key] is not None:
return s[key]
return None
def assemble_tags(spec, cli_args, enabled_releases, all_partials):
"""Gather all the tags based on our spec.
Args:
spec: Nested dict containing full Tag spec
cli_args: List of ARG=foo arguments to pass along to Docker build
enabled_releases: List of releases to parse. Empty list = all
all_partials: Dict of every partial, for reference
Returns:
Dict of tags and how to build them
"""
tag_data = collections.defaultdict(list)
for name, release in spec['releases'].items():
for tag_spec in release['tag_specs']:
if enabled_releases and name not in enabled_releases:
eprint('> Skipping release {}'.format(name))
continue
used_slice_sets, required_cli_args = get_slice_sets_and_required_args(
spec['slice_sets'], tag_spec)
slice_combos = aggregate_all_slice_combinations(spec, used_slice_sets)
for slices in slice_combos:
tag_args = gather_tag_args(slices, cli_args, required_cli_args)
tag_name = build_name_from_slices(tag_spec, slices, tag_args,
release['is_dockerfiles'])
used_partials = gather_slice_list_items(slices, 'partials')
used_tests = gather_slice_list_items(slices, 'tests')
test_runtime = find_first_slice_value(slices, 'test_runtime')
dockerfile_subdirectory = find_first_slice_value(
slices, 'dockerfile_subdirectory')
dockerfile_contents = merge_partials(spec['header'], used_partials,
all_partials)
tag_data[tag_name].append({
'release': name,
'tag_spec': tag_spec,
'is_dockerfiles': release['is_dockerfiles'],
'upload_images': release['upload_images'],
'cli_args': tag_args,
'dockerfile_subdirectory': dockerfile_subdirectory or '',
'partials': used_partials,
'tests': used_tests,
'test_runtime': test_runtime,
'dockerfile_contents': dockerfile_contents,
})
return tag_data
def merge_partials(header, used_partials, all_partials):
"""Merge all partial contents with their header."""
used_partials = list(used_partials)
return '\n'.join([header] + [all_partials[u] for u in used_partials])
def upload_in_background(hub_repository, dock, image, tag):
"""Upload a docker image (to be used by multiprocessing)."""
image.tag(hub_repository, tag=tag)
print(dock.images.push(hub_repository, tag=tag))
def mkdir_p(path):
"""Create a directory and its parents, even if it already exists."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def gather_existing_partials(partial_path):
"""Find and read all available partials.
Args:
partial_path (string): read partials from this directory.
Returns:
Dict[string, string] of partial short names (like "ubuntu/python" or
"bazel") to the full contents of that partial.
"""
partials = {}
for path, _, files in os.walk(partial_path):
for name in files:
fullpath = os.path.join(path, name)
if '.partial.Dockerfile' not in fullpath:
eprint(('> Probably not a problem: skipping {}, which is not a '
'partial.').format(fullpath))
continue
# partial_dir/foo/bar.partial.Dockerfile -> foo/bar
simple_name = fullpath[len(partial_path) + 1:-len('.partial.dockerfile')]
with open(fullpath, 'r') as f:
partial_contents = f.read()
partials[simple_name] = partial_contents
return partials
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Read the full spec file, used for everything
with open(FLAGS.spec_file, 'r') as spec_file:
tag_spec = yaml.load(spec_file)
# Get existing partial contents
partials = gather_existing_partials(FLAGS.partial_dir)
# Abort if spec.yaml is invalid
schema = yaml.load(SCHEMA_TEXT)
v = TfDockerTagValidator(schema, partials=partials)
if not v.validate(tag_spec):
eprint('> Error: {} is an invalid spec! The errors are:'.format(
FLAGS.spec_file))
eprint(yaml.dump(v.errors, indent=2))
exit(1)
tag_spec = v.normalized(tag_spec)
# Assemble tags and images used to build them
all_tags = assemble_tags(tag_spec, FLAGS.arg, FLAGS.release, partials)
# Empty Dockerfile directory if building new Dockerfiles
if FLAGS.construct_dockerfiles:
eprint('> Emptying Dockerfile dir "{}"'.format(FLAGS.dockerfile_dir))
shutil.rmtree(FLAGS.dockerfile_dir, ignore_errors=True)
mkdir_p(FLAGS.dockerfile_dir)
# Set up Docker helper
dock = docker.from_env()
# Login to Docker if uploading images
if FLAGS.upload_to_hub:
if not FLAGS.hub_username:
eprint('> Error: please set --hub_username when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_repository:
eprint(
'> Error: please set --hub_repository when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_password:
eprint('> Error: please set --hub_password when uploading to Dockerhub.')
exit(1)
dock.login(
username=FLAGS.hub_username,
password=FLAGS.hub_password,
)
# Each tag has a name ('tag') and a definition consisting of the contents
# of its Dockerfile, its build arg list, etc.
failed_tags = []
succeeded_tags = []
for tag, tag_defs in all_tags.items():
for tag_def in tag_defs:
eprint('> Working on {}'.format(tag))
if FLAGS.exclude_tags_matching and re.match(FLAGS.exclude_tags_matching,
tag):
eprint('>> Excluded due to match against "{}".'.format(
FLAGS.exclude_tags_matching))
continue
if FLAGS.only_tags_matching and not re.match(FLAGS.only_tags_matching,
tag):
eprint('>> Excluded due to failure to match against "{}".'.format(
FLAGS.only_tags_matching))
continue
# Write releases marked "is_dockerfiles" into the Dockerfile directory
if FLAGS.construct_dockerfiles and tag_def['is_dockerfiles']:
path = os.path.join(FLAGS.dockerfile_dir,
tag_def['dockerfile_subdirectory'],
tag + '.Dockerfile')
eprint('>> Writing {}...'.format(path))
if not FLAGS.dry_run:
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
f.write(tag_def['dockerfile_contents'])
# Don't build any images for dockerfile-only releases
if not FLAGS.build_images:
continue
# Only build images for host architecture
proc_arch = platform.processor()
is_x86 = proc_arch.startswith('x86')
if (is_x86 and any([arch in tag for arch in ['ppc64le']]) or
not is_x86 and proc_arch not in tag):
continue
# Generate a temporary Dockerfile to use to build, since docker-py
# needs a filepath relative to the build context (i.e. the current
# directory)
dockerfile = os.path.join(FLAGS.dockerfile_dir, tag + '.temp.Dockerfile')
if not FLAGS.dry_run:
with open(dockerfile, 'w') as f:
f.write(tag_def['dockerfile_contents'])
eprint('>> (Temporary) writing {}...'.format(dockerfile))
repo_tag = '{}:{}'.format(FLAGS.repository, tag)
eprint('>> Building {} using build args:'.format(repo_tag))
for arg, value in tag_def['cli_args'].items():
eprint('>>> {}={}'.format(arg, value))
# Note that we are NOT using cache_from, which appears to limit
# available cache layers to those from explicitly specified layers. Many
# of our layers are similar between local builds, so we want to use the
# implied local build cache.
tag_failed = False
image, logs = None, []
if not FLAGS.dry_run:
try:
# Use low level APIClient in order to stream log output
resp = dock.api.build(
timeout=FLAGS.hub_timeout,
path='.',
nocache=FLAGS.nocache,
dockerfile=dockerfile,
buildargs=tag_def['cli_args'],
tag=repo_tag)
last_event = None
image_id = None
# Manually process log output extracting build success and image id
# in order to get built image
while True:
try:
output = next(resp).decode('utf-8')
json_output = json.loads(output.strip('\r\n'))
if 'stream' in json_output:
eprint(json_output['stream'], end='')
match = re.search(r'(^Successfully built |sha256:)([0-9a-f]+)$',
json_output['stream'])
if match:
image_id = match.group(2)
last_event = json_output['stream']
# collect all log lines into the logs object
logs.append(json_output)
except StopIteration:
eprint('Docker image build complete.')
break
except ValueError:
eprint('Error parsing from docker image build: {}'.format(output))
# If Image ID is not set, the image failed to built properly. Raise
# an error in this case with the last log line and all logs
if image_id:
image = dock.images.get(image_id)
else:
raise docker.errors.BuildError(last_event or 'Unknown', logs)
# Run tests if requested, and dump output
# Could be improved by backgrounding, but would need better
# multiprocessing support to track failures properly.
if FLAGS.run_tests_path:
if not tag_def['tests']:
eprint('>>> No tests to run.')
for test in tag_def['tests']:
eprint('>> Testing {}...'.format(test))
container, = dock.containers.run(
image,
'/tests/' + test,
working_dir='/',
log_config={'type': 'journald'},
detach=True,
stderr=True,
stdout=True,
volumes={
FLAGS.run_tests_path: {
'bind': '/tests',
'mode': 'ro'
}
},
runtime=tag_def['test_runtime']),
ret = container.wait()
code = ret['StatusCode']
out = container.logs(stdout=True, stderr=False)
err = container.logs(stdout=False, stderr=True)
container.remove()
if out:
eprint('>>> Output stdout:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard out.')
if err:
eprint('>>> Output stderr:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard err.')
if code != 0:
eprint('>> {} failed tests with status: "{}"'.format(
repo_tag, code))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
else:
eprint('>> Tests look good!')
except docker.errors.BuildError as e:
eprint('>> {} failed to build with message: "{}"'.format(
repo_tag, e.msg))
eprint('>> Build logs follow:')
log_lines = [l.get('stream', '') for l in e.build_log]
eprint(''.join(log_lines))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
# Clean temporary dockerfiles if they were created earlier
if not FLAGS.keep_temp_dockerfiles:
os.remove(dockerfile)
# Upload new images to DockerHub as long as they built + passed tests
if FLAGS.upload_to_hub:
if not tag_def['upload_images']:
continue
if tag_failed:
continue
eprint('>> Uploading to {}:{}'.format(FLAGS.hub_repository, tag))
if not FLAGS.dry_run:
p = multiprocessing.Process(
target=upload_in_background,
args=(FLAGS.hub_repository, dock, image, tag))
p.start()
if not tag_failed:
succeeded_tags.append(tag)
if failed_tags:
eprint(
'> Some tags failed to build or failed testing, check scrollback for '
'errors: {}'.format(','.join(failed_tags)))
exit(1)
eprint('> Writing built{} tags to standard out.'.format(
' and tested' if FLAGS.run_tests_path else ''))
for tag in succeeded_tags:
print('{}:{}'.format(FLAGS.repository, tag))
if __name__ == '__main__':
app.run(main)
| tensorflow-master | tensorflow/tools/dockerfiles/assembler.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import bytes # pylint: disable=redefined-builtin
import argparse
import json
import os
import shutil
import subprocess
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
# Force to bytes so this works on python 2 and python 3
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
version_separator = b"-"
if git_tag_override and val:
split_val = val.split(version_separator)
if len(split_val) < 3:
raise Exception(
("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
"but got '%s'") % val)
# There might be "-" in the tag name. But we can be sure that the final
# two "-" are those inserted by the git describe command.
abbrev_commit = split_val[-1]
val = version_separator.join(
[bytes(git_tag_override, "utf-8"), b"0", abbrev_commit])
return val if val else unknown_label
except (subprocess.CalledProcessError, OSError):
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = b"git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {
#ifdef _MSC_VER
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
return "MSVC " TOSTRING(_MSC_FULL_VER);
#else
return __VERSION__;
#endif
}
int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version.decode("utf-8")
open(filename, "w").write(contents)
def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
"""
git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--git_tag_override", type=str,
help="Override git tag value in the __git_version__ string. Useful when "
"creating release builds before the release tag is created.")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
parser.add_argument(
"--source_dir",
type=str,
help="Base path of the source code (used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
| tensorflow-master | tensorflow/tools/git/gen_git_source.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to help with the TensorFlow 2.0 transition.
This module is meant for TensorFlow internal implementation, not for users of
the TensorFlow library. For that see tf.compat instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
_force_enable = None
def enable():
"""Enables v2 behaviors."""
global _force_enable
_force_enable = True
def disable():
"""Disables v2 behaviors."""
global _force_enable
_force_enable = False
def enabled():
"""Returns True iff TensorFlow 2.0 behavior should be enabled."""
if _force_enable is None:
return os.getenv("TF2_BEHAVIOR", "0") != "0"
else:
return _force_enable
| tensorflow-master | tensorflow/python/tf2.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for enabling and disabling TF2 behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python import tf2
from tensorflow.python.distribute import combinations
from tensorflow.python.platform import test
def set_environ():
os.environ['TF2_BEHAVIOR'] = '1'
def unset_environ():
os.environ['TF2_BEHAVIOR'] = '0'
class EnablingTF2Behavior(test.TestCase, parameterized.TestCase):
def setUp(self):
super(EnablingTF2Behavior, self).setUp()
tf2._force_enable = None
if 'TF2_BEHAVIOR' in os.environ:
del os.environ['TF2_BEHAVIOR']
actions = [tf2.enable, tf2.disable, set_environ, unset_environ]
@combinations.generate(
combinations.combine(
action_0=actions, action_1=actions,
action_2=actions, action_3=actions))
def test_scenarios(self, action_0, action_1, action_2, action_3):
def state(action, enabled, disabled):
"""Returns bool tuple (tf2_enabled, force_enabled, force_disabled)."""
if action is tf2.enable:
return True, True, False
elif action is tf2.disable:
return False, False, True
elif action is set_environ:
return not disabled, enabled, disabled
elif action is unset_environ:
return enabled, enabled, disabled
else:
raise ValueError('Unexpected action {}. {} are supported'.format(
action, EnablingTF2Behavior.actions))
action_0()
expected, enabled, disabled = state(action_0, False, False)
self.assertEqual(tf2.enabled(), expected)
action_1()
expected, enabled, disabled = state(action_1, enabled, disabled)
self.assertEqual(tf2.enabled(), expected)
action_2()
expected, enabled, disabled = state(action_2, enabled, disabled)
self.assertEqual(tf2.enabled(), expected)
action_3()
expected, enabled, disabled = state(action_3, enabled, disabled)
self.assertEqual(tf2.enabled(), expected)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/tf2_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import config
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import distribute
from tensorflow.python import keras
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.module import module
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import manip_ops as manip
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import ragged
from tensorflow.python.ops import sets
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.signal import signal
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.tpu import bfloat16 as _
from tensorflow.python.tpu import tpu as _
from tensorflow.python.tpu import tpu_optimizer as _
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Import audio ops to make sure the ops are registered.
from tensorflow.python.ops import gen_audio_ops as _
# Import boosted trees ops to make sure the ops are registered (but unused).
from tensorflow.python.ops import gen_boosted_trees_ops as _gen_boosted_trees_ops
# Import cudnn rnn ops to make sure their ops are registered.
from tensorflow.python.ops import gen_cudnn_rnn_ops as _
# Import rnn_ops to make sure their ops are registered.
from tensorflow.python.ops import gen_rnn_ops as _
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.compat import v2_compat
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export
# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework.ops import enable_eager_execution
# Necessary for the symbols in this module to be taken into account by
# the namespace management system (API decorators).
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
# XLA JIT compiler APIs.
from tensorflow.python.compiler.xla import jit
from tensorflow.python.compiler.xla import xla
# Required due to `rnn` and `rnn_cell` not being imported in `nn` directly
# (due to a circular dependency issue: rnn depends on layers).
nn.dynamic_rnn = rnn.dynamic_rnn
nn.static_rnn = rnn.static_rnn
nn.raw_rnn = rnn.raw_rnn
nn.bidirectional_dynamic_rnn = rnn.bidirectional_dynamic_rnn
nn.static_state_saving_rnn = rnn.static_state_saving_rnn
nn.rnn_cell = rnn_cell
# Export protos
# pylint: disable=undefined-variable
tf_export(v1=['AttrValue'])(AttrValue)
tf_export(v1=['ConfigProto'])(ConfigProto)
tf_export(v1=['Event', 'summary.Event'])(Event)
tf_export(v1=['GPUOptions'])(GPUOptions)
tf_export(v1=['GraphDef'])(GraphDef)
tf_export(v1=['GraphOptions'])(GraphOptions)
tf_export(v1=['HistogramProto'])(HistogramProto)
tf_export(v1=['LogMessage'])(LogMessage)
tf_export(v1=['MetaGraphDef'])(MetaGraphDef)
tf_export(v1=['NameAttrList'])(NameAttrList)
tf_export(v1=['NodeDef'])(NodeDef)
tf_export(v1=['OptimizerOptions'])(OptimizerOptions)
tf_export(v1=['RunMetadata'])(RunMetadata)
tf_export(v1=['RunOptions'])(RunOptions)
tf_export(v1=['SessionLog', 'summary.SessionLog'])(SessionLog)
tf_export(v1=['Summary', 'summary.Summary'])(Summary)
tf_export(v1=['summary.SummaryDescription'])(SummaryDescription)
tf_export(v1=['SummaryMetadata'])(SummaryMetadata)
tf_export(v1=['summary.TaggedRunMetadata'])(TaggedRunMetadata)
tf_export(v1=['TensorInfo'])(TensorInfo)
# pylint: enable=undefined-variable
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
'__cxx11_abi_flag__',
'__monolithic_build__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
| tensorflow-master | tensorflow/python/__init__.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""If possible, exports all symbols with RTLD_GLOBAL.
Note that this file is only imported by pywrap_tensorflow.py if this is a static
build (meaning there is no explicit framework cc_binary shared object dependency
of _pywrap_tensorflow_internal.so). For regular (non-static) builds, RTLD_GLOBAL
is not necessary, since the dynamic dependencies of custom/contrib ops are
explicit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated python library
# that dynamically loads _pywrap_tensorflow.so. The default mode for loading
# keeps all the symbol private and not visible to other libraries that may be
# loaded. Setting the mode to RTLD_GLOBAL to make the symbols visible, so that
# custom op libraries imported using `tf.load_op_library()` can access symbols
# defined in _pywrap_tensorflow.so.
_use_rtld_global = (hasattr(sys, 'getdlopenflags')
and hasattr(sys, 'setdlopenflags'))
if _use_rtld_global:
_default_dlopen_flags = sys.getdlopenflags()
def set_dlopen_flags():
if _use_rtld_global:
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)
def reset_dlopen_flags():
if _use_rtld_global:
sys.setdlopenflags(_default_dlopen_flags)
| tensorflow-master | tensorflow/python/pywrap_dlopen_global_flags.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A wrapper for TensorFlow SWIG-generated bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
import traceback
from tensorflow.python.platform import self_check
# Perform pre-load sanity checks in order to produce a more actionable error
# than we get from an error during SWIG import.
self_check.preload_check()
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
try:
# This import is expected to fail if there is an explicit shared object
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
from tensorflow.python import pywrap_dlopen_global_flags
_use_dlopen_global_flags = True
except ImportError:
_use_dlopen_global_flags = False
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
# python library that dynamically loads _pywrap_tensorflow.so.
_can_set_rtld_local = (hasattr(sys, 'getdlopenflags')
and hasattr(sys, 'setdlopenflags'))
if _can_set_rtld_local:
_default_dlopen_flags = sys.getdlopenflags()
try:
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.set_dlopen_flags()
elif _can_set_rtld_local:
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
# override an RTLD_GLOBAL in _default_dlopen_flags).
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
from tensorflow.python.pywrap_tensorflow_internal import *
from tensorflow.python.pywrap_tensorflow_internal import __version__
from tensorflow.python.pywrap_tensorflow_internal import __git_version__
from tensorflow.python.pywrap_tensorflow_internal import __compiler_version__
from tensorflow.python.pywrap_tensorflow_internal import __cxx11_abi_flag__
from tensorflow.python.pywrap_tensorflow_internal import __monolithic_build__
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.reset_dlopen_flags()
elif _can_set_rtld_local:
sys.setdlopenflags(_default_dlopen_flags)
except ImportError:
msg = """%s\n\nFailed to load the native TensorFlow runtime.\n
See https://www.tensorflow.org/install/errors\n
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.""" % traceback.format_exc()
raise ImportError(msg)
# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
| tensorflow-master | tensorflow/python/pywrap_tensorflow.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion of plain Python into TensorFlow graph code.
NOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using
`tf.function`. This module contains lower-level APIs for advanced use.
For more information, see the
[AutoGraph guide](https://www.tensorflow.org/guide/autograph).
By equivalent graph code we mean code that generates a TensorFlow graph when
run. The generated graph has the same effects as the original code when executed
(for example with `tf.function` or `tf.compat.v1.Session.run`). In other words,
using AutoGraph can be thought of as running Python in TensorFlow.
"""
# TODO(b/119833526): Link to the new tf.function + autograph tutorial.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Bring only the relevant symbols to the top level.
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core.converter import ConversionOptions
from tensorflow.python.autograph.core.converter import Feature
from tensorflow.python.autograph.impl.api import AutoGraphError
from tensorflow.python.autograph.impl.api import convert
from tensorflow.python.autograph.impl.api import converted_call
from tensorflow.python.autograph.impl.api import do_not_convert
from tensorflow.python.autograph.impl.api import RunMode
from tensorflow.python.autograph.impl.api import StackTraceMapper
from tensorflow.python.autograph.impl.api import to_code
from tensorflow.python.autograph.impl.api import to_graph
from tensorflow.python.autograph.lang.directives import set_element_type
from tensorflow.python.autograph.lang.directives import set_loop_options
from tensorflow.python.autograph.lang.special_functions import stack
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.util.all_util import remove_undocumented
# TODO(mdan): Revisit this list once we finalize the generated code mechanism.
_allowed_symbols = [
# Main API
'AutoGraphError',
'ConversionOptions',
'Feature',
'RunMode',
'StackTraceMapper',
'convert',
'converted_call',
'do_not_convert',
'to_code',
'to_graph',
# Overloaded operators
'operators',
# Python language "extensions"
'set_element_type',
'set_loop_options',
'stack',
'tensor_list',
# Utilities: to be removed
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
| tensorflow-master | tensorflow/python/autograph/__init__.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import imp
import sys
import threading
import types
import unittest
import weakref
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import side_effect_guards
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.core import unsupported_features_checker
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.util import tf_inspect
class _ConvertedEntityFactoryInfo(
collections.namedtuple(
'_ConvertedEntityFactoryInfo',
('module_name', 'converted_name', 'factory_factory_name', 'source_map'))
):
"""Holds metadata about a converted entity stored as a dynamic factory.
The dynamic factory is assumed to be created by _wrap_into_dynamic_factory,
be named `factory_factory_name` and located inside the module named as
`module_name`.
Attributes:
module_name: Text, the name of the module containing the entity.
converted_name: Text, the name of the converted entity.
factory_factory_name: Text, the name of the dynamic factory.
source_map: Dict.
"""
def __str__(self):
return '_ConvertedEntityFactoryInfo({} in {})'.format(
self.converted_name, self.module_name)
def get_module(self):
return sys.modules[self.module_name]
def get_factory(self):
assert self.module_name in sys.modules
factory_factory = getattr(sys.modules[self.module_name],
self.factory_factory_name)
return factory_factory()
# TODO(mdan): Add a garbage collection hook for cleaning up modules.
class _ConversionCache(object):
"""A hierarchical cache that uses the converted entity as weak key.
The keys soft references (i.e. they are discarded when the key is
destroyed). The subkeys are normal hashable values.
This class is generic - see the call site for how the keys and values are
defined.
"""
def __init__(self):
self._cache = weakref.WeakKeyDictionary()
def has(self, key, subkey):
if key not in self._cache:
return False
return subkey in self._cache[key]
def __getitem__(self, key):
if key not in self._cache:
# The bucket needs to be initialized to support this usage:
# cache[key][subkey] = value
self._cache[key] = {}
return self._cache[key]
# Using a re-entrant lock to guard against the unlikely possibility that the
# conversion process tiggers additional code execution.
_CACHE_LOCK = threading.RLock()
_CACHE = _ConversionCache()
# Note: strictly speaking, a simple factory might have been sufficient for
# functions. But the double factory approach allows us to control the closure
# and globals of the converted code in a cleaner fashion.
# TODO(mdan): A simple factory may be sufficient.
def _wrap_into_dynamic_factory(nodes, entity_name, factory_factory_name,
factory_name, closure_vars, future_features):
"""Wraps an AST into the body of a dynamic factory.
This uses the dynamic factory (factory of factory) pattern to achieve the
following:
1. The inner factory, dynamically creates the entity represented by nodes.
2. The entity is parametrized by `ag__`, the internal AutoGraph module.
3. The outer factory creates the inner factory with a lexical scope
in which `closure_vars` are bound local variables. This in turn allows the
caller to control the exact closure (i.e. non-global free variables) for
the inner factory.
The AST is expected to define some symbol named by `entity_name`.
Args:
nodes: ast.AST
entity_name: Union[Text, ast.AST]
factory_factory_name: Text
factory_name: Text
closure_vars: Iterable[Text]
future_features: Iterable[Text], see EntityInfo.future_features.
Returns:
ast.AST
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
dummy_closure_defs = []
for var_name in closure_vars:
template = """
var_name = None
"""
dummy_closure_defs.extend(templates.replace(template, var_name=var_name))
if future_features:
future_imports = gast.ImportFrom(
module='__future__',
names=[gast.alias(name=name, asname=None) for name in future_features],
level=0)
else:
future_imports = []
# These dummy symbol declarations create local fariables in a function scope,
# so that the Python parser correctly marks them as free non-global variables
# upon load (that is, it creates cell slots for each symbol). Their values are
# not used, as the cells are swapped with the original entity's cells after
# the code has been loaded.
template = """
future_imports
def factory_factory_name():
dummy_closure_defs
def factory_name(ag__, ag_source_map__, ag_module__):
entity_defs
entity_name.ag_source_map = ag_source_map__
entity_name.ag_module = ag_module__
entity_name.autograph_info__ = {}
return entity_name
return factory_name
"""
return templates.replace(
template,
future_imports=future_imports,
factory_factory_name=factory_factory_name,
factory_name=factory_name,
dummy_closure_defs=dummy_closure_defs,
entity_defs=nodes,
entity_name=entity_name)
def _convert_with_cache(entity, program_ctx, free_nonglobal_var_names):
"""Returns a (possibly cached) factory for the converted result of entity."""
# The cache key is the entity's code object if it defined one, otherwise it's
# the entity itself. Keying by the code object allows caching of functions
# that are dynamically created e.g. in a loop.
if hasattr(entity, '__code__'):
key = entity.__code__
else:
key = entity
# The cache subkey encompases any conversion options on which the generated
# code may depend.
# The cached factory includes the necessary definitions to distinguish
# between the global and non-global free variables. For this reason, the
# cache subkey includes the names of the free non-globals.
subkey = (program_ctx.options, frozenset(free_nonglobal_var_names))
with _CACHE_LOCK:
# The cache values are _ConvertedEntityFactoryInfo objects.
if _CACHE.has(key, subkey):
# TODO(mdan): Check whether the module is still loaded.
converted_entity_info = _CACHE[key][subkey]
logging.log(3, 'Cache hit for entity %s key %s subkey %s: %s', entity,
key, subkey, converted_entity_info)
return converted_entity_info
logging.log(1, 'Entity %s is not cached for key %s subkey %s', entity, key,
subkey)
nodes, converted_name, entity_info = convert_entity_to_ast(
entity, program_ctx)
namer = naming.Namer(entity_info.namespace)
factory_factory_name = namer.new_symbol('create_converted_entity_factory',
())
factory_name = namer.new_symbol('create_converted_entity', ())
nodes = _wrap_into_dynamic_factory(nodes, converted_name,
factory_factory_name, factory_name,
free_nonglobal_var_names,
entity_info.future_features)
module, _, source_map = compiler.ast_to_object(
nodes, include_source_map=True)
module_name = module.__name__
converted_entity_info = _ConvertedEntityFactoryInfo(
module_name=module_name,
converted_name=converted_name,
factory_factory_name=factory_factory_name,
source_map=source_map)
_CACHE[key][subkey] = converted_entity_info
return converted_entity_info
def _instantiate(entity, converted_entity_info, free_nonglobal_var_names):
"""Creates a converted instance and binds it to match original entity."""
factory = converted_entity_info.get_factory()
# `factory` is currently bound to the empty module it was loaded from.
# It must instead be bound to the globals and closure from the original
# entity.
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
entity_globals = entity.__globals__
entity_closure = entity.__closure__ or ()
elif hasattr(entity, '__module__'):
entity_globals = sys.modules[entity.__module__].__dict__
entity_closure = ()
assert len(entity_closure) == len(free_nonglobal_var_names)
# Fit the original entity's cells to match the order of factory's cells.
original_names_and_cells = dict(zip(free_nonglobal_var_names, entity_closure))
new_factory_cells = tuple(
original_names_and_cells[name] for name in factory.__code__.co_freevars)
bound_factory = types.FunctionType(
code=factory.__code__,
globals=entity_globals,
name=factory.__name__,
argdefs=(),
closure=new_factory_cells)
# Two other free vars: the internal "ag__" module and the source
# map. These are wired via the parameters of the factory.
converted_entity = bound_factory( # pylint:disable=not-callable
ag_internal, converted_entity_info.source_map,
converted_entity_info.get_module())
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
# Attach the default argument to the converted function.
converted_entity.__defaults__ = entity.__defaults__
if hasattr(entity, '__kwdefaults__'):
converted_entity.__kwdefaults__ = entity.__kwdefaults__
return converted_entity
def convert(entity, program_ctx):
"""Converts an entity into an equivalent entity."""
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
free_nonglobal_var_names = entity.__code__.co_freevars
else:
free_nonglobal_var_names = ()
for i, name in enumerate(free_nonglobal_var_names):
if (name == 'ag__' and
entity.__closure__[i].cell_contents is not ag_internal):
raise ValueError('entity {} uses the reserved symbol "{}"'.format(
entity, name))
# TODO(mdan): In extreme cases, other ag__ symbols may also be clobbered.
converted_entity_info = _convert_with_cache(entity, program_ctx,
free_nonglobal_var_names)
return _instantiate(entity, converted_entity_info, free_nonglobal_var_names)
def is_whitelisted_for_graph(o, check_call_override=True):
"""Checks whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
check_call_override: Reserved for internal use. When set to `False`, it
disables the rule according to which classes are whitelisted if their
__call__ method is whitelisted.
Returns:
Boolean
"""
# TODO(b/120224672): Fix this.
if isinstance(o, functools.partial):
# tf_inspect.getmodule(functools.partial(...)) otherwise returns None since
# functools.partial objects do not have a __module__ attribute.
m = functools
else:
m = tf_inspect.getmodule(o)
# Examples of callables that lack a __module__ property include builtins.
if hasattr(m, '__name__'):
for rule in config.CONVERSION_RULES:
action = rule.get_action(m)
if action == config.Action.CONVERT:
logging.log(2, 'Not whitelisted: %s: %s', o, rule)
return False
elif action == config.Action.DO_NOT_CONVERT:
logging.log(2, 'Whitelisted: %s: %s', o, rule)
return True
if tf_inspect.isgeneratorfunction(o):
logging.warn(
'Entity %s appears to be a generator function. It will not be converted'
' by AutoGraph.', o)
logging.log(2, 'Whitelisted: %s: generator functions are not converted', o)
return True
if (check_call_override and not tf_inspect.isclass(o) and
hasattr(o, '__call__')):
# Callable objects: whitelisted if their __call__ method is.
# The type check avoids infinite recursion around the __call__ method
# of function objects.
if (type(o) != type(o.__call__)) and is_whitelisted_for_graph(o.__call__): # pylint: disable=unidiomatic-typecheck
logging.log(2, 'Whitelisted: %s: object __call__ whitelisted', o)
return True
owner_class = None
if tf_inspect.ismethod(o):
# Methods of whitelisted classes are also whitelisted, even if they are
# bound via user subclasses.
#
# For example, suppose `tf.Foo` has a method called `bar`, and `baz` is
# defined as below. `tf.Foo` is whitelisted. Then `baz.bar` is also
# whitelisted.
#
# class Custom(tf.Foo):
# pass
#
# baz = Custom()
#
# For the example above, if `Custom` did overload `bar`, then it would no
# longer be whitelisted.
owner_class = inspect_utils.getmethodclass(o)
if owner_class is not None:
if issubclass(owner_class, unittest.TestCase):
logging.log(2, 'Whitelisted: %s: method of TestCase subclass', o)
return True
owner_class = inspect_utils.getdefiningclass(o, owner_class)
if is_whitelisted_for_graph(owner_class, check_call_override=False):
logging.log(2, 'Whitelisted: %s: owner is whitelisted %s', o,
owner_class)
return True
if inspect_utils.isnamedtuple(o):
# Due to the way they're constructed, namedtuple types cannot be converted
# because they don't expose source code. But we assume they are safe for
# graph mode since they are just containers.
logging.log(2, 'Whitelisted: %s: named tuple', o)
return True
logging.log(2, 'Not whitelisted: %s: default rule', o)
return False
# TODO(mdan): Rename to convert_*_node to avoid confusion with convert.
def convert_entity_to_ast(o, program_ctx):
"""Compile a Python entity into equivalent TensorFlow.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
logging.log(1, 'Converting %s', o)
if tf_inspect.isclass(o):
nodes, name, entity_info = convert_class_to_ast(o, program_ctx)
elif tf_inspect.isfunction(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif tf_inspect.ismethod(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif hasattr(o, '__class__'):
# Note: this should only be raised when attempting to convert the object
# directly. converted_call should still support it.
raise NotImplementedError(
'cannot convert entity "{}": object conversion is not yet'
' supported.'.format(o))
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
if logging.has_verbosity(2):
logging.log(2, 'Compiled output of %s:\n\n%s\n', o,
compiler.ast_to_source(nodes))
if logging.has_verbosity(4):
for n in nodes:
logging.log(4, 'Compiled AST of %s:\n\n%s\n\n', o,
pretty_printer.fmt(n, color=False))
return nodes, name, entity_info
def convert_class_to_ast(c, program_ctx):
"""Specialization of `convert_entity_to_ast` for classes."""
# TODO(mdan): Revisit this altogether. Not sure we still need it.
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('cannot convert %s: no member methods' % c)
# TODO(mdan): Don't clobber namespaces for each method in one class namespace.
# The assumption that one namespace suffices for all methods only holds if
# all methods were defined in the same module.
# If, instead, functions are imported from multiple modules and then spliced
# into the class, then each function has its own globals and __future__
# imports that need to stay separate.
# For example, C's methods could both have `global x` statements referring to
# mod1.x and mod2.x, but using one namespace for C would cause a conflict.
# from mod1 import f1
# from mod2 import f2
# class C(object):
# method1 = f1
# method2 = f2
class_namespace = {}
future_features = None
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
(node,), _, entity_info = convert_func_to_ast(
m, program_ctx=program_ctx, do_rename=False)
class_namespace.update(entity_info.namespace)
converted_members[m] = node
# TODO(mdan): Similarly check the globals.
if future_features is None:
future_features = entity_info.future_features
elif frozenset(future_features) ^ frozenset(entity_info.future_features):
# Note: we can support this case if ever needed.
raise ValueError(
'cannot convert {}: if has methods built with mismatched future'
' features: {} and {}'.format(c, future_features,
entity_info.future_features))
namer = naming.Namer(class_namespace)
class_name = namer.class_name(c.__name__)
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated.
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
raise NotImplementedError(
'Conversion of classes that do not directly extend classes from'
' whitelisted modules is temporarily suspended. If this breaks'
' existing code please notify the AutoGraph team immediately.')
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
# Generate the definition of the converted class.
bases = [gast.Name(n, gast.Load(), None) for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
# TODO(mdan): Find a way better than forging this object.
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
future_features=future_features,
namespace=class_namespace)
return output_nodes, class_name, entity_info
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
# TODO(mdan): Move into core or replace with an actual importable module.
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.__dict__.update(autograph_module.__dict__)
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.STD = converter.STANDARD_OPTIONS
ag_internal.Feature = converter.Feature
ag_internal.utils = utils
ag_internal.function_scope = function_wrapping.function_scope
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(special_functions.__dict__)
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def convert_func_to_ast(f, program_ctx, do_rename=True):
"""Specialization of `convert_entity_to_ast` for callable functions."""
future_features = inspect_utils.getfutureimports(f)
node, source = parser.parse_entity(f, future_features=future_features)
logging.log(3, 'Source code of %s:\n\n%s\n', f, source)
# Parsed AST should contain future imports and one function def node.
# In general, the output of inspect.getsource is inexact for lambdas because
# it uses regex matching to adjust the exact location around the line number
# that CPython records. Then, the entire containing line is returned, which
# we may have trouble disambiguating. For example:
# x, y = lambda: 1, lambda: 2
if f.__name__ == '<lambda>':
nodes = ast_util.find_matching_definitions(node, f)
if len(nodes) != 1:
raise ValueError(
'Unable to identify source code of lambda function {}. It was'
' defined on this line: {}, which must contain a single lambda with'
' matching signature. To avoid ambiguity, define each lambda'
' in a separate expression.'.format(f, source))
node, = nodes
# TODO(znado): Place inside standard_analysis.
origin_info.resolve_entity(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = naming.Namer(namespace)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
context = converter.EntityContext(namer, entity_info, program_ctx)
node = node_to_graph(node, context)
if isinstance(node, gast.Lambda):
new_name = namer.new_symbol('tf__lambda', ())
node = gast.Assign(
targets=[gast.Name(new_name, gast.Store(), None)], value=node)
elif do_rename:
new_name = namer.function_name(f.__name__)
node.name = new_name
else:
new_name = f.__name__
assert node.name == new_name
return (node,), new_name, entity_info
def node_to_graph(node, context):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
unsupported_features_checker.verify(node)
node = converter.standard_analysis(node, context, is_initial=True)
node = converter.apply_(node, context, arg_defaults)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
if context.program.options.uses(converter.Feature.ASSERT_STATEMENTS):
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
node = converter.apply_(node, context, return_statements)
if context.program.options.uses(converter.Feature.LISTS):
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
node = converter.apply_(node, context, logical_expressions)
if context.program.options.uses(converter.Feature.AUTO_CONTROL_DEPS):
node = converter.apply_(node, context, side_effect_guards)
# TODO(mdan): If function scopes ever does more, the toggle will need moving.
if context.program.options.uses(converter.Feature.NAME_SCOPES):
node = converter.apply_(node, context, function_scopes)
return node
| tensorflow-master | tensorflow/python/autograph/impl/conversion.py |
# python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class ApiTest(test.TestCase):
def test_converted_call_kwonly_args(self):
def test_fn(*, a):
return a
x = api.converted_call(test_fn, None,
converter.ConversionOptions(recursive=True),
(), {'a': constant_op.constant(-1)})
self.assertEqual(-1, self.evaluate(x))
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
| tensorflow-master | tensorflow/python/autograph/impl/api_py3_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the user-facing API for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import inspect
import os
import pdb
import re
import sys
import textwrap
import traceback
from enum import Enum
# pylint:disable=g-bad-import-order
import six
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.framework import errors_impl
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def is_autograph_strict_conversion_mode():
return int(os.environ.get('AUTOGRAPH_STRICT_CONVERSION', '0')) > 0
# TODO(mdan): Export this symbol.
class AutoGraphError(Exception):
"""Base class for all AutoGraph exceptions."""
pass
class ConversionError(AutoGraphError):
"""Raised during the conversion process."""
pass
class StagingError(AutoGraphError):
"""Raised during the staging (i.e. Python execution) of converted code."""
pass
class _ErrorMetadata(errors.ErrorMetadataBase):
"""AutoGraph-specific error metadata. See base class."""
def create_exception(self, preferred_type):
if preferred_type == errors_impl.OpError:
# Best-effort unpacking of OpError exceptions.
# TODO(mdan): Use a mechanism that is more future-proof.
t = type(self.cause)
init_argspec = tf_inspect.getfullargspec(t.__init__)
message = self.get_message()
init_args = tuple(init_argspec.argspec)
# At the time of this writing, TF errors either take 3 or 4 arguments,
# with the fourth being error_code.
if init_args == ('self', 'node_def', 'op', 'message', 'error_code'):
return t(
node_def=self.cause.node_def,
op=self.cause.op,
message=message,
error_code=self.error_code)
elif init_args == ('self', 'node_def', 'op', 'message'):
if 'error_code' in init_argspec.kwonlyargs:
return t(
node_def=self.cause.node_def,
op=self.cause.op,
message=message,
errro_code=self.error_code)
else:
return t(
node_def=self.cause.node_def, op=self.cause.op, message=message)
elif preferred_type in (AutoGraphError, ConversionError, StagingError):
return preferred_type(self.get_message())
exc = super(_ErrorMetadata, self).create_exception(preferred_type)
if exc is not None:
return exc
# Note: While changing an error's message property to change the message it
# displays will probably work a lot of times, there is no standard way in
# Python to do that. The safest way is therefore to create a new exception.
# For user defined exceptions, we could define an interface that allowed
# them to work under this mechanism.
return StagingError(self.get_message())
class StackTraceMapper(tf_stack.StackTraceMapper):
"""Remaps generated code to code it originated from."""
def __init__(self, converted_fn):
self._source_map = converted_fn.ag_source_map
def map(self, filename, lineno, name):
loc = origin_info.LineLocation(filename=filename, lineno=lineno)
if loc not in self._source_map:
return filename, lineno, name
origin = self._source_map[loc]
return origin.loc.filename, origin.loc.lineno, origin.function_name
def tf_convert(f, ctx, convert_by_default=True, force_conversion=False):
"""Decorator that applies AutoGraph to a function.
Use in internal APIs.
This API is suitable for high order functions internal to the TensorFlow API,
and more generally any function to which Autograph is not applied.
Guidance: convert was a decorator meant for use directly by developers, and
will be soon deprecated in favor of tf.function. tf_convert is to be called
from high order functions internal to TF.
Args:
f: Callable.
ctx: ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used.
convert_by_default: bool, whether to use AutoGraph when the context doesn't
specify.
force_conversion: bool, whether to ignore the conversion whitelist. See
ConversionOptions.force_conversion.
Returns:
Either `f or the converted version of `f`.
"""
if hasattr(f, '__ag_compiled'):
return f
f_wrapper = f
decorators, f = tf_decorator.unwrap(f)
apply_autograph = ((ctx.status == ag_ctx.Status.ENABLED) or
(convert_by_default and
ctx.status == ag_ctx.Status.UNSPECIFIED))
if apply_autograph:
# TODO(mdan): Grab features from context.
wrapper = convert(recursive=True, force_conversion=force_conversion)(f)
else:
wrapper = do_not_convert(f)
if decorators:
wrapper = tf_decorator.rewrap(f_wrapper, f, wrapper)
setattr(wrapper, '__ag_compiled', True)
return wrapper
# TODO(mdan): Make private.
def convert(recursive=False, optional_features=None, force_conversion=True):
"""Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recursive: bool, whether to recursively convert any functions or classes
that the converted function may use.
optional_features: converted.Feature, allows toggling optional or
experimental features. When set to None, only the core features are
enabled.
force_conversion: bool, whether to ignore the conversion whitelist. See
ConversionOptions.force_conversion.
Returns:
Callable, a decorator that converts the given function into an equivalent
function that uses TensorFlow ops.
"""
def decorator(f):
"""Decorator implementation."""
def wrapper(*args, **kwargs):
"""Wrapper that calls the converted version of f."""
with ag_ctx.ControlStatusCtx(
status=ag_ctx.Status.ENABLED, options=optional_features):
try:
return converted_call(
f, None,
converter.ConversionOptions(
recursive=recursive,
force_conversion=force_conversion,
optional_features=optional_features,
), args, kwargs)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, 'ag_error_metadata'):
raise e.ag_error_metadata.to_exception(type(e))
else:
raise
if inspect.isfunction(f) or inspect.ismethod(f):
wrapper = functools.update_wrapper(wrapper, f)
decorated_wrapper = tf_decorator.make_decorator(f, wrapper)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(decorated_wrapper, '__ag_compiled', True)
return decorated_wrapper
return decorator
class RunMode(Enum):
"""Specifies the way a converted function or method should be executed in TF.
Attributes:
* GRAPH: Call this function directly, as-is. This is suitable for functions
that were already designed for TF graphs and contain ops.
* PY_FUNC: Wrap this function into a py_func op. This is suitable for code
that will only run correctly in Python, for example code that renders to
the display, reads keyboard input, etc.
"""
GRAPH = 1
PY_FUNC = 2
def do_not_convert_internal(f):
"""Decorator that marks internal functions which do not need conversion."""
setattr(f, '__ag_compiled', True)
return f
@tf_export('autograph.experimental.do_not_convert')
def do_not_convert(func=None, run_as=RunMode.GRAPH, return_dtypes=None):
"""Decorator that suppresses the conversion of a function.
See also: docs/pyfunc_dtypes.md
Args:
func: function to decorate.
run_as: RunMode, specifies how to use the function in TensorFlow.
return_dtypes: Optional[Iterable[ Union[tf.DType,
utils.py_func.MatchDType]]], the return data types of the converted
function, if run_as is RunMode.PY_FUNC. Ignored otherwise. May be set to
None if the function has no return values.
Returns:
If `func` is not None, returns a `Callable` which is equivalent to
`func`, but is not converted by AutoGraph.
If `func` is None, returns a decorator that, when invoked with a
single `func` argument, returns a `Callable` equivalent to the
above case.
"""
if func is None:
return functools.partial(
do_not_convert,
run_as=run_as,
return_dtypes=return_dtypes)
@functools.wraps(func)
def graph_wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return func(*args, **kwargs)
@functools.wraps(func)
def py_func_wrapper(*args, **kwargs):
if kwargs:
raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
func, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
if run_as == RunMode.GRAPH:
wrapper = graph_wrapper
elif run_as == RunMode.PY_FUNC:
wrapper = py_func_wrapper
else:
raise ValueError('unknown value for run_as: %s' % run_as)
setattr(wrapper, '__ag_compiled', True)
return wrapper
def _attach_metadata(e, f, converted):
"""Augments an error with the metadata necessary for rewrite."""
if hasattr(e, 'ag_pass_through'):
return
metadata = getattr(e, 'ag_error_metadata', None)
source_map = f.ag_source_map if converted else {}
if metadata is None:
logging.log(
1, 'Caught error in %s (converted=%s)', f, converted, exc_info=True)
message = '{}: {}'.format(e.__class__.__name__, e)
else:
message = None
cause_tb = traceback.extract_tb(sys.exc_info()[2])[1:]
e.ag_error_metadata = _ErrorMetadata(cause_tb, metadata, message, source_map)
def _call_unconverted(f, args, kwargs):
"""Calls the original function without converting with AutoGraph."""
if inspect_utils.istfmethodtarget(f):
return f.__self__.call(args, kwargs)
try:
if kwargs is not None:
return f(*args, **kwargs)
else:
return f(*args)
except Exception as e: # pylint:disable=broad-except
_attach_metadata(e, f, False)
raise
def _is_known_loaded_type(f, module_name, entity_name):
"""Tests whether the function or method is an instance of a known type."""
if (module_name not in sys.modules or
not hasattr(sys.modules[module_name], entity_name)):
return False
type_entity = getattr(sys.modules[module_name], entity_name)
if isinstance(f, type_entity):
# The method if of this type. Example:
#
# o = ClassType()
# function(o.method)()
return True
# Note: inspect is required here, to avoid unpacking tf.function decorators.
if inspect.ismethod(f):
# The the unbound method if of this type. Example:
#
# class ClassType:
# @function
# def method(self):
# ...
# o = ClassType()
# o.method()
if isinstance(f.__func__, type_entity):
return True
return False
def converted_call(f, owner, options, args, kwargs):
"""Compiles a function call inline. For internal use only."""
if owner is not None:
if not isinstance(f, str):
raise ValueError(
'When owner is specified, the function name must be specified as'
' a string: {}'.format(f))
owner_attr = f
# Special case when the owner is a 'super' object. In that case lookups of
# dynamic attributes won't work. See
# inspect_utils.SuperWrapperForDynamicAttrs.
if isinstance(owner, super):
owner = inspect_utils.SuperWrapperForDynamicAttrs(owner)
f = getattr(owner, f)
if logging.has_verbosity(1):
if owner is not None:
composite_desc = '("{}" attr of {})'.format(owner_attr, owner)
else:
composite_desc = ''
logging.log(1, 'Converted call: %s %s\n args: %s\n kwargs: %s\n', f,
composite_desc, args, kwargs)
if inspect_utils.isbuiltin(f):
if f is eval:
return py_builtins.eval_in_original_context(f, args, 1)
if kwargs:
return py_builtins.overload_of(f)(*args, **kwargs)
else:
return py_builtins.overload_of(f)(*args)
# TODO(mdan): Clean up the naming inconsistency.
if hasattr(f, 'autograph_info__') or hasattr(f, '__ag_compiled'):
logging.log(2, 'Permanently whitelisted: %s: already converted', f)
return _call_unconverted(f, args, kwargs)
# TODO(b/122265385): Remove this bypass.
if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper') or
_is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
logging.warn(
'Entity {} appears to be decorated by wrapt, which is not yet supported'
' by AutoGraph. The function will be called without transformation.'
' You may however apply AutoGraph before the decorator.'.format(f))
logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
return _call_unconverted(f, args, kwargs)
if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
return _call_unconverted(f, args, kwargs)
# Constructors are permanently whitelisted.
# TODO(mdan): Toggle as experimental feature instead.
# TODO(b/124016764): Remove this limitation.
if tf_inspect.isclass(f):
logging.log(2, 'Permanently whitelisted: %s: constructor', f)
return _call_unconverted(f, args, kwargs)
# Other built-in modules are permanently whitelisted.
# TODO(mdan): Figure out how to do this consistently for all stdlib modules.
if any(
f in m.__dict__.values() for m in (collections, pdb, copy, inspect, re)):
logging.log(2, 'Permanently whitelisted: %s: part of builtin module', f)
return _call_unconverted(f, args, kwargs)
# Custom ops and kernels are also permanently whitelisted.
# See tensorflow.framework.load_library.
if (hasattr(f, '__module__') and
hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
return _call_unconverted(f, args, kwargs)
if not options.force_conversion and conversion.is_whitelisted_for_graph(f):
return _call_unconverted(f, args, kwargs)
# internal_convert_user_code is for example turned off when issuing a dynamic
# call conversion from generated code while in nonrecursive mode. In that
# case we evidently don't want to recurse, but we still have to convert
# things like builtins.
if not options.internal_convert_user_code:
return _call_unconverted(f, args, kwargs)
# TODO(mdan): Move this entire block inside to_graph.
try: # Begin of transformation error guards
# Unwrap functools.partial objects
# TODO(mdan): Consider sharing unwrapping logic with tf_inspect.
while isinstance(f, functools.partial):
args = f.args + args
new_kwargs = {}
if f.keywords is not None:
new_kwargs.update(f.keywords)
if kwargs is not None:
new_kwargs.update(kwargs)
kwargs = new_kwargs
f = f.func
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
# Regular functions
target_entity = f
f_self = inspect_utils.getmethodself(f)
# TODO(b/119246461): This may be more elegantly handled using __get__?
if f_self is not None:
effective_args = (f_self,) + args
else:
effective_args = args
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
# Callable objects
target_entity = f.__call__
effective_args = (f,) + args
elif tf_inspect.isclass(f):
# Constructors
# Note: Until we support class constructurs, and enable whole-class
# conversion with an experimental flag, this branch is dead code.
# TODO(mdan): Consider removing unless there is a compelling use case.
target_entity = f
effective_args = args
else:
target_entity = f
raise NotImplementedError('unknown callable type "%s"' % type(f))
if not tf_inspect.isclass(target_entity):
if not hasattr(target_entity, '__code__'):
logging.log(2, 'Permanently whitelisted: %s: native binding',
target_entity)
return _call_unconverted(f, args, kwargs)
elif (hasattr(target_entity.__code__, 'co_filename') and
target_entity.__code__.co_filename == '<string>'):
# TODO(mdan): __globals__['txt'] might work in Py3.
logging.log(2, 'Permanently whitelisted: %s: dynamic code (exec?)',
target_entity)
return _call_unconverted(f, args, kwargs)
converted_f = to_graph(
target_entity,
recursive=options.recursive,
experimental_optional_features=options.optional_features)
if logging.has_verbosity(2):
logging.log(2, 'Defaults of %s : %s', converted_f,
converted_f.__defaults__)
if six.PY3:
logging.log(2, 'KW defaults of %s : %s',
converted_f, converted_f.__kwdefaults__)
if kwargs is not None:
callargs = tf_inspect.getcallargs(converted_f, *effective_args,
**kwargs)
else:
callargs = tf_inspect.getcallargs(converted_f, *effective_args)
formatted_callargs = '\n'.join(
' {}: {}'.format(k, v) for k, v in callargs.items())
logging.log(2, 'Calling %s with\n%s\n', converted_f, formatted_callargs)
except Exception as e: # pylint:disable=broad-except
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
logging.warn(
'Entity %s could not be transformed and will be executed as-is.'
' Please report this to the AutoGraph team. When filing the bug, set'
' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
' attach the full output. Cause: %s', target_entity, e)
return _call_unconverted(f, args, kwargs)
with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
try:
if kwargs is not None:
result = converted_f(*effective_args, **kwargs)
else:
result = converted_f(*effective_args)
except Exception as e:
_attach_metadata(e, converted_f, True)
raise
return result
@tf_export('autograph.to_graph', v1=[])
def to_graph(entity, recursive=True, experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
try:
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
optional_features=experimental_optional_features),
autograph_module=tf_inspect.getmodule(to_graph))
return conversion.convert(entity, program_ctx)
except (ValueError, AttributeError, KeyError, NameError, AssertionError) as e:
logging.error(1, 'Error converting %s', entity, exc_info=True)
raise ConversionError('converting {}: {}: {}'.format(
entity, e.__class__.__name__, str(e)))
@tf_export(v1=['autograph.to_graph'])
def to_graph_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
del arg_types
del arg_values
return to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export(v1=['autograph.to_code'])
def to_code_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
indentation=' ',
experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
indentation: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
del arg_values
del arg_types
del indentation
return to_code(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export('autograph.to_code', v1=[])
def to_code(entity, recursive=True, experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
source = tf_inspect.getsource(
to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features))
return textwrap.dedent(source)
| tensorflow-master | tensorflow/python/autograph/impl/api.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
import threading
import gast
import six
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted_for_graph(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted_for_graph(test_fn))
self.assertTrue(conversion.is_whitelisted_for_graph(utils))
self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant))
def test_is_whitelisted_for_graph_tensorflow_like(self):
tf_like = imp.new_module('tensorflow_foo')
def test_fn():
pass
tf_like.test_fn = test_fn
test_fn.__module__ = tf_like
self.assertFalse(conversion.is_whitelisted_for_graph(tf_like.test_fn))
def test_is_whitelisted_for_graph_callable_whitelisted_call(self):
whitelisted_mod = imp.new_module('test_whitelisted_call')
sys.modules['test_whitelisted_call'] = whitelisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_whitelisted_call'),) +
config.CONVERSION_RULES)
class TestClass(object):
def __call__(self):
pass
def whitelisted_method(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
if six.PY2:
TestClass.__call__.__func__.__module__ = 'test_whitelisted_call'
else:
TestClass.__call__.__module__ = 'test_whitelisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_whitelisted_for_graph(TestClass.__call__))
self.assertTrue(conversion.is_whitelisted_for_graph(tc))
self.assertTrue(conversion.is_whitelisted_for_graph(tc.__call__))
self.assertTrue(conversion.is_whitelisted_for_graph(tc.whitelisted_method))
self.assertFalse(conversion.is_whitelisted_for_graph(Subclass))
self.assertFalse(conversion.is_whitelisted_for_graph(tc.converted_method))
def test_convert_entity_to_ast_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.convert_entity_to_ast('dummy', program_ctx)
def test_convert_entity_to_ast_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, info = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(info.namespace['b'], b)
def test_convert_entity_to_ast_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None')
def test_convert_entity_to_ast_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.convert_entity_to_ast(f, program_ctx)
f_node, = nodes
self.assertEqual('tf__f', f_node.name)
def test_convert_entity_to_ast_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.convert_entity_to_ast(TestSubclass, program_ctx)
def test_convert_entity_to_ast_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
(import_node, class_node), name, _ = conversion.convert_entity_to_ast(
TestSubclass, program_ctx)
self.assertEqual(import_node.names[0].name, 'Model')
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_convert_entity_to_ast_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_entity_to_ast_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['a'], a)
def test_convert_entity_to_ast_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.convert_entity_to_ast(f, program_ctx)
def test_convert_entity_to_ast_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (
x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
(fn_node,), name, _ = conversion.convert_entity_to_ast(f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_convert_entity_to_ast_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_concurrency(self):
def test_fn():
pass
generated_file_names = []
def conversion_thread():
new_f = conversion.convert(test_fn, self._simple_program_ctx())
generated_file_names.append(new_f.__code__.co_filename)
threads = tuple(
threading.Thread(target=conversion_thread) for _ in range(10))
for t in threads:
t.start()
for t in threads:
t.join()
# Races would potentially create multiple files (non-deterministically,
# but with high likelihood).
self.assertEqual(len(set(generated_file_names)), 1)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/impl/conversion_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import gc
import imp
import os
import re
import textwrap
import types
import numpy as np
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
tf = utils.fake_tf()
global_n = 2
class TestResource(object):
def __init__(self):
self.x = 3
class ApiTest(test.TestCase):
@test_util.run_deprecated_v1
def test_decorator_recursive(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_decorator_not_recursive(self):
class TestClass(object):
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_convert_then_do_not_convert_graph(self):
class TestClass(object):
@api.do_not_convert(run_as=api.RunMode.GRAPH)
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant((2, 4)), constant_op.constant(1),
constant_op.constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
@test_util.run_deprecated_v1
def test_convert_then_do_not_convert_py_func(self):
class TestClass(object):
@api.do_not_convert(
run_as=api.RunMode.PY_FUNC, return_dtypes=py_func.MatchDType(1))
def called_member(self, a):
return np.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
y = self.called_member(a)
# set_shape works around while_loop's limitations.
# TODO(mdan): Allow specifying shapes (or ShapeLike) instead.
y.set_shape(a.shape)
x //= y
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant((2, 4)), constant_op.constant(1),
constant_op.constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
@test_util.run_deprecated_v1
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_decorator_preserves_argspec(self):
class TestClass(object):
def test_method(self, a):
if a < 0:
a = -a
return a
test_method_converted = api.convert()(test_method)
tc = TestClass()
self.assertListEqual(
list(tf_inspect.getfullargspec(tc.test_method)),
list(tf_inspect.getfullargspec(tc.test_method_converted)))
def test_do_not_convert_argspec(self):
class TestClass(object):
def test_method(self, x, y):
z = x + y
return z
test_method_whitelisted = api.do_not_convert(test_method)
tc = TestClass()
self.assertTrue(tf_inspect.ismethod(tc.test_method_whitelisted))
# Because the wrapped function is not generated, we can't preserve its
# arg spec.
self.assertEqual((),
tuple(function_utils.fn_args(tc.test_method_whitelisted)))
@test_util.run_deprecated_v1
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= api.converted_call(self.called_member, None,
converter.ConversionOptions(recursive=True),
(a,), {})
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, None,
converter.ConversionOptions(recursive=True), (3,),
{})
self.assertEqual((0, 1, 2), tuple(x))
x = api.converted_call('compile', re,
converter.ConversionOptions(recursive=True),
('mnas_v4_a.*\\/.*(weights|kernel):0$',), {})
self.assertIsNotNone(x.match('mnas_v4_a/weights:0'))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
x = api.converted_call(test_fn, None,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
self.assertEqual(1, self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
x = api.converted_call(
functools.partial(test_fn, constant_op.constant(-1), z=-3), None,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
functools.partial(
functools.partial(test_fn, constant_op.constant(-1)), z=-3), None,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
def test_converted_call_method_explicit_owner(self):
# TODO(mdan): Implement.
pass
def test_converted_call_method_explicit_super_owner(self):
# TODO(mdan): Implement.
pass
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method, None,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_synthetic_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_function(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
test_method = types.MethodType(test_function, tc)
x = api.converted_call(test_method, None,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_wrapper(self):
class TestClass(object):
def foo(self):
pass
tc = TestClass()
# `method.__get__()` returns a so-called method-wrapper.
wrapper = api.converted_call('__get__', tc.foo,
converter.ConversionOptions(recursive=True),
(tc,), {})
self.assertEqual(wrapper, tc.foo)
def test_converted_call_method_as_object_attribute(self):
class AnotherClass(object):
def __init__(self):
self.another_class_attr = constant_op.constant(1)
def method(self):
if self.another_class_attr > 0:
return self.another_class_attr + 1
return self.another_class_attr + 10
class TestClass(object):
def __init__(self, another_obj_method):
self.another_obj_method = another_obj_method
obj = AnotherClass()
tc = TestClass(obj.method)
x = api.converted_call('another_obj_method', tc,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(self.evaluate(x), 2)
def test_converted_call_method_converts_recursively(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def other_method(self):
if self.x < 0:
return -self.x
return self.x
def test_method(self):
return self.other_method()
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method, None,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(TestClass.test_method, None,
converter.ConversionOptions(recursive=True), (tc,),
{})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc, None,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
@test_util.run_deprecated_v1
def test_converted_call_constructor(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = api.converted_call(TestClass, None,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
# tc is still a TestClass - constructors are whitelisted.
# TODO(b/124016764): Support this use case.
# The error below is specific to the `if` statement not being converted.
with self.assertRaisesRegex(
TypeError, 'Using a `tf.Tensor` as a Python `bool`'):
tc.test_method()
def test_converted_call_mangled_properties(self):
class TestClass(object):
def __init__(self, x):
self.__private = x
def test_method(self):
if self.__private < 0:
return self.__private
return self.__private
tc = TestClass(constant_op.constant(-1))
# The error below is specific to the `if` statement not being converted.
with self.assertRaisesRegex(NotImplementedError, 'Mangled names'):
api.converted_call('test_method', tc,
converter.ConversionOptions(recursive=True), (), {})
tc.test_method()
def test_converted_call_already_converted(self):
def f(x):
return x == 0
x = api.converted_call(f, None, converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f, None,
converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
def test_converted_call_then_already_converted_dynamic(self):
@api.convert()
def g(x):
if x > 0:
return x
else:
return -x
def f(g, x):
return g(x)
x = api.converted_call(f, None, converter.ConversionOptions(recursive=True),
(g, constant_op.constant(1)), {})
self.assertEqual(self.evaluate(x), 1)
def test_converted_call_forced_when_explicitly_whitelisted(self):
@api.do_not_convert()
def f(x):
return x + 1
x = api.converted_call(
f, None,
converter.ConversionOptions(recursive=True, force_conversion=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f, None,
converter.ConversionOptions(recursive=True), (0,),
{})
self.assertEqual(x, 1)
@test_util.run_deprecated_v1
def test_converted_call_no_user_code(self):
def f(x):
return len(x)
opts = converter.ConversionOptions(internal_convert_user_code=False)
# f should not be converted, causing len to error out.
with self.assertRaisesRegexp(Exception,
'object of type \'Tensor\' has no len()'):
api.converted_call(f, None, opts, (constant_op.constant([0]),), {})
# len on the other hand should work fine.
x = api.converted_call(len, None, opts, (constant_op.constant([0]),), {})
# The constant has static shape so the result is a primitive not a Tensor.
self.assertEqual(x, 1)
def test_converted_call_no_kwargs_allowed(self):
def f(*args):
# Note: np.broadcast rejects any **kwargs, even *{}
return np.broadcast(args[:1])
opts = converter.ConversionOptions(internal_convert_user_code=False)
self.assertIsNotNone(api.converted_call(f, None, opts, (1, 2, 3, 4), None))
def test_converted_call_whitelisted_method(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([
core.Dense(2)
])
x = api.converted_call(model.call, None, opts,
(constant_op.constant([[0.0]]),), {'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_whitelisted_method_via_owner(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([
core.Dense(2)
])
x = api.converted_call('call', model, opts,
(constant_op.constant([[0.0]]),), {'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_numpy(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(np.arange, None, opts, (5,), {})
self.assertAllEqual(x, list(range(5)))
def test_converted_call_tf_op_forced(self):
# TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS.
opts = converter.ConversionOptions(
force_conversion=True, optional_features=None)
x = api.converted_call(gen_math_ops.add, None, opts, (1, 1), {})
self.assertAllEqual(self.evaluate(x), 2)
def test_converted_call_exec_generated_code(self):
temp_mod = imp.new_module('test_module')
dynamic_code = '''
def foo(x):
return x + 1
'''
exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used
opts = converter.ConversionOptions(optional_features=None)
x = api.converted_call(temp_mod.foo, None, opts, (1,), {})
self.assertAllEqual(x, 2)
def test_converted_call_namedtuple(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(collections.namedtuple, None, opts,
('TestNamedtuple', ('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_via_collections(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call('namedtuple', collections, opts, ('TestNamedtuple',
('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_lambda(self):
opts = converter.ConversionOptions(recursive=True)
l = lambda x: x == 0
x = api.converted_call(l, None, opts, (constant_op.constant(0),), {})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(True, self.evaluate(x))
def test_converted_call_defun_object_method(self):
opts = converter.ConversionOptions(recursive=True)
# pylint:disable=method-hidden
class TestClass(object):
def method(self):
return 1
def prepare(self):
self.method = function.defun(self.method)
# pylint:enable=method-hidden
tc = TestClass()
tc.prepare()
x = api.converted_call(tc.method, None, opts, (), {})
self.assertAllEqual(1, self.evaluate(x))
def test_converted_call_through_tf_dataset(self):
def other_fn(x):
if x > 0:
return x
return -x
def f():
return dataset_ops.Dataset.range(-3, 3).map(other_fn)
# Dataset iteration only works inside tf.function.
@def_function.function
def graph_fn():
opts = converter.ConversionOptions(recursive=True)
ds = api.converted_call(f, None, opts, (), {})
itr = iter(ds)
return next(itr), next(itr), next(itr)
self.assertAllEqual(self.evaluate(graph_fn()), (3, 2, 1))
def assertNoMemoryLeaks(self, f):
object_ids_before = {id(o) for o in gc.get_objects()}
f()
gc.collect()
objects_after = tuple(
o for o in gc.get_objects() if id(o) not in object_ids_before)
self.assertEmpty(
tuple(o for o in objects_after if isinstance(o, TestResource)))
def test_converted_call_no_leaks_via_closure(self):
def test_fn():
res = TestResource()
def f(y):
return res.x + y
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, None, opts, (1,), {})
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_leaks_via_inner_function_closure(self):
def test_fn():
res = TestResource()
def f(y):
def inner_f():
return res.x + y
return inner_f
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, None, opts, (1,), {})()
self.assertNoMemoryLeaks(test_fn)
def test_context_tracking_direct_calls(self):
@api.do_not_convert()
def unconverted_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.DISABLED)
@api.convert()
def converted_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
unconverted_fn()
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED)
converted_fn()
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.UNSPECIFIED)
def test_to_graph_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with tf.Graph().as_default():
x = compiled_fn(constant_op.constant((4, 8)), 4)
self.assertAllEqual(self.evaluate(x), (1, 2))
@test_util.run_deprecated_v1
def test_to_graph_with_defaults(self):
foo = 4
def test_fn(x, s=foo):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with self.cached_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]))
self.assertListEqual([1, 2], self.evaluate(x).tolist())
def test_to_graph_with_globals(self):
def test_fn(x):
global global_n
global_n = x + global_n
return global_n
converted_fn = api.to_graph(test_fn)
prev_val = global_n
converted_fn(10)
self.assertGreater(global_n, prev_val)
def test_to_graph_with_kwargs_clashing_converted_call(self):
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match converted_call's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_with_kwargs_clashing_unconverted_call(self):
@api.do_not_convert
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match _call_unconverted's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_caching(self):
def test_fn(x):
if x > 0:
return x
else:
return -x
converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1))
# All outputs are from the same module. We can't use __module__ because
# that's reset when we instantiate the function (see conversion.py).
# TODO(mdan): Can and should we overwrite __module__ instead?
module_names = frozenset(f.ag_module for f in converted_functions)
self.assertEqual(len(module_names), 1)
self.assertNotIn('__main__', module_names)
self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3)
def test_to_graph_caching_different_options(self):
def called_fn():
pass
def test_fn():
return called_fn()
converted_recursive = api.to_graph(test_fn, recursive=True)
converted_non_recursive = api.to_graph(test_fn, recursive=False)
self.assertNotEqual(converted_recursive.ag_module,
converted_non_recursive.ag_module)
self.assertIn('ag__.STD', tf_inspect.getsource(converted_recursive))
self.assertNotIn('internal_convert_user_code=False',
tf_inspect.getsource(converted_recursive))
self.assertIn('internal_convert_user_code=False',
tf_inspect.getsource(converted_non_recursive))
self.assertNotIn('internal_convert_user_code=True',
tf_inspect.getsource(converted_non_recursive))
def test_to_graph_preserves_bindings(self):
y = 3
def test_fn():
return y
converted = api.to_graph(test_fn)
self.assertEqual(converted(), 3)
y = 7
self.assertEqual(converted(), 7)
def test_to_graph_source_map(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
def test_to_code_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
# Just check that the output is parseable Python code.
self.assertIsNotNone(parser.parse_str(api.to_code(test_fn)))
def test_tf_convert_direct(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
# Note: the autograph setting of tf.function has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_tf_convert_whitelisted_method(self):
model = sequential.Sequential([
core.Dense(2)
])
converted_call = api.tf_convert(
model.call, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
_, converted_target = tf_decorator.unwrap(converted_call)
self.assertIs(converted_target.__func__, model.call.__func__)
def test_tf_convert_wrapped(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
@functools.wraps(f)
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
decorated_f = tf_decorator.make_decorator(f, wrapper)
# Note: the autograph setting of tf.function has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(decorated_f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
# tf_convert mutates the decorator, so we need to create a new one for
# another test.
decorated_f = tf_decorator.make_decorator(f, wrapper)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
| tensorflow-master | tensorflow/python/autograph/impl/api_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkers for detecting unsupported Python features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
class UnsupportedFeaturesChecker(gast.NodeVisitor):
"""Quick check for Python features we know we don't support.
Any features detected will cause AutoGraph to not compile a function.
"""
def visit_Attribute(self, node):
if (node.attr is not None
and node.attr.startswith('__') and not node.attr.endswith('__')):
raise NotImplementedError(
'Mangled names are not yet supported by AutoGraph')
# These checks could potentially be replaced with inspect.isgeneratorfunction
# to avoid a getsource/parse/ast-walk round trip.
def visit_Yield(self, node):
raise NotImplementedError('Generators are not supported by AutoGraph')
def visit_YieldFrom(self, node):
raise NotImplementedError('Generators are not supported by AutoGraph')
def verify(node):
UnsupportedFeaturesChecker().visit(node)
| tensorflow-master | tensorflow/python/autograph/core/unsupported_features_checker.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for wrapping converted functions bodies with auxiliary logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
@contextlib.contextmanager
def function_scope(function_name):
"""Returns a context manager for the converted body of a function."""
with ops.name_scope(function_name):
yield
| tensorflow-master | tensorflow/python/autograph/core/function_wrapping.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
DoNotConvert('tensorflow'),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
# TODO(b/133842282): Remove.
DoNotConvert('tensorflow_datasets.core'),
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('numpy'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('re'),
DoNotConvert('threading'),
)
| tensorflow-master | tensorflow/python/autograph/core/config.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
class Rule(object):
"""Base class for conversion rules."""
def __init__(self, module_prefix):
self._prefix = module_prefix
def matches(self, module_name):
return (module_name.startswith(self._prefix + '.') or
module_name == self._prefix)
class Action(enum.Enum):
NONE = 0
CONVERT = 1
DO_NOT_CONVERT = 2
class DoNotConvert(Rule):
"""Indicates that this module should be not converted."""
def __str__(self):
return 'DoNotConvert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.DO_NOT_CONVERT
return Action.NONE
class Convert(Rule):
"""Indicates that this module should be converted."""
def __str__(self):
return 'Convert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.CONVERT
return Action.NONE
| tensorflow-master | tensorflow/python/autograph/core/config_lib.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Symbol naming utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.utils import misc
class _NamingStyle(enum.Enum):
SNAKE = 1
CAMEL = 2
class Namer(object):
"""Symbol name generartor."""
def __init__(self, global_namespace):
self.global_namespace = global_namespace
self.generated_names = set()
def _as_symbol_name(self, fqn, style=_NamingStyle.SNAKE):
"""Returns a symbol name that matches a fully-qualified name.
The returned name is safe to use for Python symbols. Any special characters
present in fqn are replaced according to the style argument.
Examples:
self._as_symbol_name('foo.bar', style=_NamingStyle.CAMEL) == 'FooBar'
self._as_symbol_name('foo.bar', style=_NamingStyle.SNAKE) == 'foo_bar'
See the unit tests for more examples.
Args:
fqn: Union[Text, Tuple[Text]] a fully-qualified symbol name. The qualifier
may include module, class names, attributes, etc.
style: _NamingStyle
Returns:
Text
"""
assert style in _NamingStyle
if isinstance(fqn, tuple):
cn = '.'.join(fqn)
else:
cn = fqn
# Until we clean up the whole FQN mechanism, `fqn` may not be
# canonical, that is, in can appear as ('foo.bar', 'baz')
# This replaces any characters that might remain because of that.
pieces = cn.split('.')
if style == _NamingStyle.CAMEL:
pieces = tuple(misc.capitalize_initial(p) for p in pieces)
return ''.join(pieces)
elif style == _NamingStyle.SNAKE:
return '_'.join(pieces)
def class_name(self, original_fqn):
"""Returns the name of a converted class."""
canonical_name = self._as_symbol_name(
original_fqn, style=_NamingStyle.CAMEL)
new_name_root = 'Tf%s' % canonical_name
new_name = new_name_root
n = 0
while new_name in self.global_namespace:
n += 1
new_name = '%s_%d' % (new_name_root, n)
self.generated_names.add(new_name)
return new_name
def function_name(self, original_fqn):
"""Returns the name of a converted function."""
canonical_name = self._as_symbol_name(
original_fqn, style=_NamingStyle.SNAKE)
new_name_root = 'tf__%s' % canonical_name
new_name = new_name_root
n = 0
while new_name in self.global_namespace:
n += 1
new_name = '%s_%d' % (new_name_root, n)
self.generated_names.add(new_name)
return new_name
def new_symbol(self, name_root, reserved_locals):
"""See control_flow.SymbolNamer.new_symbol."""
# reserved_locals may contain QNs.
all_reserved_locals = set()
for s in reserved_locals:
if isinstance(s, qual_names.QN):
all_reserved_locals.update(s.qn)
elif isinstance(s, str):
all_reserved_locals.add(s)
else:
raise ValueError('Unexpected symbol type "%s"' % type(s))
pieces = name_root.split('_')
if pieces[-1].isdigit():
name_root = '_'.join(pieces[:-1])
n = int(pieces[-1])
else:
n = 0
new_name = name_root
while (new_name in self.global_namespace or
new_name in all_reserved_locals or new_name in self.generated_names):
n += 1
new_name = '%s_%d' % (name_root, n)
self.generated_names.add(new_name)
return new_name
| tensorflow-master | tensorflow/python/autograph/core/naming.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import naming
from tensorflow.python.platform import test
class NamerTest(test.TestCase):
def test_function_name_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('tf__foo', namer.function_name('foo'))
self.assertEqual('tf__bar', namer.function_name('bar'))
self.assertItemsEqual(('tf__bar', 'tf__foo'), namer.generated_names)
def test_function_name_consistent(self):
namer = naming.Namer({})
self.assertEqual('tf__foo', namer.function_name('foo'))
self.assertEqual('tf__foo', namer.function_name('foo'))
def test_function_name_unsanitized_fqn(self):
namer = naming.Namer({})
self.assertEqual('tf__foo_bar', namer.function_name('foo.bar'))
self.assertEqual('tf__foo_bar_baz', namer.function_name(('foo.bar', 'baz')))
def test_class_name_basic(self):
namer = naming.Namer({})
self.assertEqual('TfFooBar', namer.class_name(('foo', 'Bar')))
def test_class_name_unsanitized_fqn(self):
namer = naming.Namer({})
self.assertEqual('TfFooBarBaz', namer.class_name(('foo.bar', 'Baz')))
def test_function_name_avoids_global_conflicts(self):
namer = naming.Namer({'tf__foo': 1})
self.assertEqual('tf__foo_1', namer.function_name('foo'))
def test_new_symbol_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1})
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/core/naming_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
force_conversion: bool, whether to force convertinng the target entity. When
force_conversion is turned off, the converter may decide to return the
function as-is.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
force_conversion=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.force_conversion = force_conversion
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.force_conversion,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def to_ast(self, internal_convert_user_code=None):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Args:
internal_convert_user_code: Optional[bool], allows ovrriding the
corresponding value.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
force_conversion=force_conversion_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
if internal_convert_user_code is None:
internal_convert_user_code = self.internal_convert_user_code
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
force_conversion_val=parser.parse_expression(
str(self.force_conversion)),
internal_convert_user_code_val=parser.parse_expression(
str(internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
force_conversion=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| tensorflow-master | tensorflow/python/autograph/core/converter.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thread-local context managers for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import enum
stacks = threading.local()
def _control_ctx():
if not hasattr(stacks, 'control_status'):
stacks.control_status = [_default_control_status_ctx()]
return stacks.control_status
def control_status_ctx():
ret = _control_ctx()[-1]
return ret
class Status(enum.Enum):
UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
class ControlStatusCtx(object):
"""A context that tracks whether autograph is enabled by the user."""
def __init__(self, status, options=None):
self.status = status
self.options = options
def __enter__(self):
_control_ctx().append(self)
return self
def __repr__(self):
return '{}[status={}, options={}]'.format(
self.__class__.__name__, self.status, self.options)
def __exit__(self, unused_type, unused_value, unused_traceback):
assert _control_ctx()[-1] is self
_control_ctx().pop()
def _default_control_status_ctx():
return ControlStatusCtx(status=Status.UNSPECIFIED)
| tensorflow-master | tensorflow/python/autograph/core/ag_ctx.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for function_wrapping module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FunctionWrappingTest(test.TestCase):
@test_util.run_deprecated_v1
def test_function_scope_name(self):
with function_wrapping.function_scope('test_name'):
t = constant_op.constant(1)
self.assertIn('test_name', t.name)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/core/function_wrapping_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for tests in this module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import imp
import sys
import six
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
RESULT_OF_MOCK_CONVERTED_CALL = 7
class TestCase(test.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
def assertPrints(self, expected_result):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
yield
self.assertEqual(out_capturer.getvalue(), expected_result)
finally:
sys.stdout = sys.__stdout__
@contextlib.contextmanager
def compiled(self, node, namespace, *symbols):
source = None
self.dynamic_calls = []
def converted_call(*args):
"""Mock version of api.converted_call."""
self.dynamic_calls.append(args[3:]) # args only; see api.converted_call
return RESULT_OF_MOCK_CONVERTED_CALL
try:
result, source, source_map = compiler.ast_to_object(
node, include_source_map=True)
# TODO(mdan): Move the unparsing from converter into pyct and reuse here.
# TODO(mdan): Move this into self.prepare()
result.tf = self.make_fake_mod('fake_tf', *symbols)
fake_ag = self.make_fake_mod('fake_ag', converted_call,
converter.ConversionOptions)
fake_ag.__dict__.update(operators.__dict__)
fake_ag.__dict__.update(special_functions.__dict__)
fake_ag.ConversionOptions = converter.ConversionOptions
fake_ag.Feature = converter.Feature
fake_ag.utils = utils
fake_ag.function_scope = function_wrapping.function_scope
result.ag__ = fake_ag
result.ag_source_map__ = source_map
for k, v in namespace.items():
result.__dict__[k] = v
yield result
except Exception: # pylint:disable=broad-except
if source is None:
print('Offending AST:\n%s' % pretty_printer.fmt(node, color=False))
else:
print('Offending compiled code:\n%s' % source)
raise
@contextlib.contextmanager
def converted(self, entity, converter_module, namespace, *tf_symbols):
node, ctx = self.prepare(entity, namespace)
if not isinstance(converter_module, (list, tuple)):
converter_module = (converter_module,)
for i, m in enumerate(converter_module):
node = converter.standard_analysis(node, ctx, is_initial=not i)
node = m.transform(node, ctx)
with self.compiled(node, namespace, *tf_symbols) as result:
yield result
def make_fake_mod(self, name, *symbols):
fake_mod = imp.new_module(name)
for s in symbols:
if hasattr(s, '__name__'):
setattr(fake_mod, s.__name__, s)
elif hasattr(s, 'name'):
# This is a bit of a hack, but works for things like tf.int32
setattr(fake_mod, s.name, s)
else:
raise ValueError('can not attach %s - what should be its name?' % s)
return fake_mod
def attach_namespace(self, module, **ns):
for k, v in ns.items():
setattr(module, k, v)
def prepare(self, test_fn, namespace, recursive=True):
namespace['ConversionOptions'] = converter.ConversionOptions
future_features = ('print_function', 'division')
node, source = parser.parse_entity(test_fn, future_features=future_features)
namer = naming.Namer(namespace)
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(recursive=recursive),
autograph_module=None)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
ctx = converter.EntityContext(namer, entity_info, program_ctx)
origin_info.resolve_entity(node, source, test_fn)
node = converter.standard_analysis(node, ctx, is_initial=True)
return node, ctx
| tensorflow-master | tensorflow/python/autograph/core/converter_testing.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.