python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reaching_definitions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.platform import test
global_a = 7
global_b = 17
class ReachingDefinitionsAnalyzerTestBase(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
source_code=source, source_file=None, future_features=(), namespace={})
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
return node
def assertHasDefs(self, node, num):
defs = anno.getanno(node, anno.Static.DEFINITIONS)
self.assertEqual(len(defs), num)
for r in defs:
self.assertIsInstance(r, reaching_definitions.Definition)
def assertHasDefinedIn(self, node, expected):
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
defined_in_str = set(str(v) for v in defined_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(defined_in_str, set(expected))
def assertSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIs(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
def assertNotSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIsNot(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
class ReachingDefinitionsAnalyzerTest(ReachingDefinitionsAnalyzerTestBase):
def test_conditional(self):
def test_fn(a, b):
a = []
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
def test_try_in_conditional(self):
def test_fn(a, b): # pylint:disable=unused-argument
a = []
if b:
try:
pass
except: # pylint:disable=bare-except
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
def test_conditional_in_try_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
if b:
a = []
except TestException: # pylint:disable=undefined-variable,unused-variable
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].body[0], ('a', 'b'))
def test_conditional_in_except_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
pass
except TestException as e: # pylint:disable=undefined-variable,unused-variable
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].handlers[0].body[0], ('a', 'b'))
def test_while(self):
def test_fn(a):
max(a)
while True:
a = a
a = a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].value.args[0], 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].value, 1)
# The loop does have an invariant test, but the CFG doesn't know that.
self.assertHasDefs(fn_body[1].body[0].value, 2)
self.assertHasDefs(fn_body[2].value, 2)
def test_while_else(self):
def test_fn(x, i):
y = 0
while x:
x += i
if i:
break
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 2)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_for_else(self):
def test_fn(x, i):
y = 0
for i in x:
x += i
if i:
break
else:
continue
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].target, 1)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_nested_functions(self):
def test_fn(a, b):
a = []
if b:
a = []
def foo():
return a
foo()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
def_of_a_in_if = fn_body[1].body[0].targets[0]
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(def_of_a_in_if, 1)
self.assertHasDefs(fn_body[2].value, 2)
inner_fn_body = fn_body[1].body[1].body
self.assertSameDef(inner_fn_body[0].value, def_of_a_in_if)
def test_nested_functions_isolation(self):
def test_fn(a):
a = 0
def child():
a = 1
return a
child()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
parent_return = fn_body[3]
child_return = fn_body[1].body[1]
# The assignment `a = 1` makes `a` local to `child`.
self.assertNotSameDef(parent_return.value, child_return.value)
def test_function_call_in_with(self):
def foo(_):
pass
def test_fn(a):
with foo(a):
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
def test_mutation_subscript(self):
def test_fn(a):
l = []
l[0] = a
return l
node = self._parse_and_analyze(test_fn)
fn_body = node.body
creation = fn_body[0].targets[0]
mutation = fn_body[1].targets[0].value
use = fn_body[2].value
self.assertSameDef(creation, mutation)
self.assertSameDef(creation, use)
def test_deletion_partial(self):
def test_fn(a):
a = 0
if a:
del a
else:
a = 1
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
first_def = fn_body[0].targets[0]
second_def = fn_body[1].orelse[0].targets[0]
use = fn_body[2].value
self.assertNotSameDef(use, first_def)
self.assertSameDef(use, second_def)
def test_deletion_total(self):
def test_fn(a):
if a:
a = 0
else:
a = 1
del a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
use = fn_body[2].value
self.assertHasDefs(use, 0)
def test_replacement(self):
def foo(a):
return a
def test_fn(a):
a = foo(a)
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
param = node.args.args[0]
source = fn_body[0].value.args[0]
target = fn_body[0].targets[0]
retval = fn_body[1].value
self.assertSameDef(param, source)
self.assertNotSameDef(source, target)
self.assertSameDef(target, retval)
def test_comprehension_leaking(self):
def test_fn(a):
all(x for x in a)
return x # pylint:disable=undefined-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
listcomp_target = fn_body[0].value.args[0].generators[0].target
retval = fn_body[1].value
# Python2 leaks comprehension symbols. Python3 doesn't.
if six.PY2:
self.assertSameDef(retval, listcomp_target)
else:
self.assertHasDefs(retval, 0)
def test_function_definition(self):
def test_fn():
def a():
pass
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_global(self):
def test_fn():
global global_a
global global_b
if global_a:
global_b = []
return global_a, global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('global_a', 'global_b'))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_test.py |
# python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct.static_analysis import activity_test
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class ActivityAnalyzerTest(activity_test.ActivityAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('nonlocal_b', 'c'), ('nonlocal_a',))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Annotations used by the static analyzer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
# TODO(mdan): Remove.
class NoValue(Enum):
def __repr__(self):
return self.name
class NodeAnno(NoValue):
"""Additional annotations used by the static analyzer.
These are in addition to the basic annotations declared in anno.py.
"""
# Symbols
# These flags are boolean.
IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
IS_MODIFIED_SINCE_ENTRY = (
'Symbol has been explicitly replaced in the current function scope.')
# Scopes
# Scopes are represented by objects of type activity.Scope.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
ITERATE_SCOPE = 'The scope for the iterate assignment of a for loop.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
| tensorflow-master | tensorflow/python/autograph/pyct/static_analysis/annos.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reaching definition analysis.
This analysis attaches a set of a Definition objects to each symbol, one
for each distinct definition that may reach it. The Definition objects are
mutable and may be used by subsequent analyses to further annotate data like
static type and value information.
The analysis also attaches the set of the symbols defined at the entry of
control flow statements.
Requires activity analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Definition(object):
"""Definition objects describe a unique definition of a variable.
Subclasses of this may be used by passing an appropriate factory function to
resolve.
Attributes:
param_of: Optional[ast.AST]
"""
def __init__(self):
self.param_of = None
def __repr__(self):
return '%s[%d]' % (self.__class__.__name__, id(self))
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
if isinstance(init_from, _NodeState):
self.value = {
s: set(other_infos) for s, other_infos in init_from.value.items()
}
elif isinstance(init_from, dict):
self.value = {s: set((init_from[s],)) for s in init_from}
else:
assert False, init_from
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self)
for s, other_infos in other.value.items():
if s in result.value:
result.value[s].update(other_infos)
else:
result.value[s] = set(other_infos)
return result
def __sub__(self, other):
assert isinstance(other, set)
result = _NodeState(self)
for s in other:
result.value.pop(s, None)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, definition_factory):
self._definition_factory = definition_factory
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes have extra reaching definitions,
# e.g. those that a function closes over.
self.extra_in = {}
self.gen_map = {}
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
defs_in = _NodeState(self.extra_in.get(node.ast_node, None))
for n in node.prev:
defs_in |= self.out[n]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
# The definition objects created by each node must be singletons because
# their ids are used in equality checks.
if node not in self.gen_map:
node_symbols = {}
for s in node_scope.modified:
def_ = self._definition_factory()
if s in node_scope.params:
def_.param_of = weakref.ref(node_scope.params[s])
node_symbols[s] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
kill = node_scope.modified | node_scope.deleted
defs_out = gen | (defs_in - kill)
elif isinstance(node.ast_node, (gast.Global, gast.Nonlocal)):
# Special case for global and nonlocal: they generate a definition,
# but are not tracked by activity analysis.
if node not in self.gen_map:
node_symbols = {}
for s in node.ast_node.names:
qn = qual_names.QN(s)
if qn in defs_in.value:
# In Python 2, this is a syntax warning. In Python 3, it's an error.
raise ValueError(
'"{}" is assigned before global definition'.format(s))
def_ = self._definition_factory()
node_symbols[qn] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
defs_out = defs_in | gen
else:
# Nodes that don't have a scope annotation are assumed not to touch any
# symbols.
# This Name node below is a literal name, e.g. False
# This can also happen if activity.py forgot to annotate the node with a
# scope object.
assert isinstance(node.ast_node,
(gast.Name, gast.Break, gast.Continue, gast.Raise,
gast.Pass)), (node.ast_node, node)
defs_out = defs_in
self.in_[node] = defs_in
self.out[node] = defs_out
# TODO(mdan): Move this to the superclass?
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
bar = 1
def baz():
# bar = 1 reaches here
"""
def __init__(self, source_info, graphs, definition_factory):
super(TreeAnnotator, self).__init__(source_info)
self.definition_factory = definition_factory
self.graphs = graphs
self.current_analyzer = None
self.current_cfg_node = None
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
# Preorder tree processing:
# 1. if this is a child function, the parent was already analyzed and it
# has the proper state value for the subgraph's entry
# 2. analyze the current function body
# 2. recursively walk the subtree; child functions will be processed
analyzer = Analyzer(subgraph, self.definition_factory)
if parent_analyzer is not None:
# Wire the state between the two subgraphs' analyzers.
parent_out_state = parent_analyzer.out[parent_analyzer.graph.index[node]]
# Exception: symbols modified in the child function are local to it
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
parent_out_state -= body_scope.modified
analyzer.extra_in[node.args] = parent_out_state
# Complete the analysis for the local function and annotate its body.
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
self.current_analyzer = analyzer
# Note: not visiting name, decorator_list and returns because they don't
# apply to this anlysis.
# TODO(mdan): Should we still process the function name?
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
self.current_analyzer = parent_analyzer
return node
def visit_Name(self, node):
if self.current_analyzer is None:
# Names may appear outside function defs - for example in class
# definitions.
return node
analyzer = self.current_analyzer
cfg_node = self.current_cfg_node
assert cfg_node is not None, ('name node, %s, outside of any statement?'
% node.id)
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.in_[cfg_node].value.get(qn, ())))
else:
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.out[cfg_node].value.get(qn, ())))
return node
def _aggregate_predecessors_defined_in(self, node):
preds = self.current_analyzer.graph.stmt_prev[node]
node_defined_in = set()
for p in preds:
node_defined_in |= set(self.current_analyzer.out[p].value.keys())
anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
def visit_If(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_For(self, node):
self._aggregate_predecessors_defined_in(node)
# Manually accounting for the shortcoming described in
# cfg.AstToCfg.visit_For.
parent = self.current_cfg_node
self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
node.target = self.visit(node.target)
self.current_cfg_node = parent
node.iter = self.visit(node.iter)
node.body = self.visit_block(node.body)
node.orelse = self.visit_block(node.orelse)
return node
def visit_While(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_Try(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_ExceptHandler(self, node):
self._aggregate_predecessors_defined_in(node)
# TODO(mdan): Also track the exception type / name symbols.
node.body = self.visit_block(node.body)
return node
def visit(self, node):
parent = self.current_cfg_node
if (self.current_analyzer is not None and
node in self.current_analyzer.graph.index):
self.current_cfg_node = self.current_analyzer.graph.index[node]
node = super(TreeAnnotator, self).visit(node)
self.current_cfg_node = parent
return node
def resolve(node, source_info, graphs, definition_factory):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
definition_factory: Callable[[], Definition]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs, definition_factory)
node = visitor.visit(node)
return node
| tensorflow-master | tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion to A-normal form.
The general idea of A-normal form is that every intermediate value is
explicitly named with a variable. For more, see
https://en.wikipedia.org/wiki/A-normal_form.
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
import six
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, ctx):
del ctx
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
class AnfTransformer(transformer.Base):
"""Performs the conversion to A-normal form (ANF)."""
# The algorithm is a postorder recursive tree walk. Any given node A may, in
# general, require creation of a series B of Assign statements, which compute
# and explicitly name the intermediate values needed to compute the value of
# A. If A was already a statement, it can be replaced with the sequence B +
# [A]. If A was an expression, B needs to be propagated up the tree until a
# statement is encountered. Since the `ast.NodeTransformer` framework makes
# no provision for subtraversals returning side information, this class
# accumulates the sequence B in an instance variable.
# The only other subtlety is that some Python statements (like `if`) have both
# expression fields (`test`) and statement list fields (`body` and `orelse`).
# Any additional assignments needed to name all the intermediate values in the
# `test` can be prepended to the `if` node, but assignments produced by
# processing the `body` and the `orelse` need to be kept together with them,
# and not accidentally lifted out of the `if`.
def __init__(self, ctx, gensym_source=None):
"""Creates an ANF transformer.
Args:
ctx: transformer.Context
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names
"""
super(AnfTransformer, self).__init__(ctx)
if gensym_source is None:
self._gensym = DummyGensym(ctx)
else:
self._gensym = gensym_source(ctx)
self._pending_statements = []
def _consume_pending_statements(self):
ans = self._pending_statements
self._pending_statements = []
return ans
def _add_pending_statement(self, stmt):
self._pending_statements.append(stmt)
_trivial_nodes = (
# Non-nodes that show up as AST fields
bool, six.string_types,
# Leaf nodes that are already in A-normal form
gast.expr_context, gast.Name, gast.Num, gast.Str, gast.Bytes,
gast.NameConstant, gast.Ellipsis,
# Binary operators
gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow, gast.LShift,
gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd, gast.FloorDiv,
# Unary operators
gast.Invert, gast.Not, gast.UAdd, gast.USub,
# Comparison operators
gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE,
gast.Is, gast.IsNot, gast.In, gast.NotIn,
)
def _is_node_trivial(self, node):
if node is None:
return True
elif isinstance(node, self._trivial_nodes):
return True
elif isinstance(node, gast.keyword):
return self._is_node_trivial(node.value)
elif isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
return self._are_children_trivial(node)
return False
def _are_children_trivial(self, node):
for field in node._fields:
if not field.startswith('__'):
if not self._is_node_trivial(getattr(node, field)):
return False
return True
def _ensure_node_is_trivial(self, node):
if node is None:
return node
elif isinstance(node, self._trivial_nodes):
return node
elif isinstance(node, list):
# If something's field was actually a list, e.g., variadic arguments.
return [self._ensure_node_is_trivial(n) for n in node]
elif isinstance(node, gast.keyword):
node.value = self._ensure_node_is_trivial(node.value)
return node
elif isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
return self._ensure_fields_trivial(node)
elif isinstance(node, gast.expr):
temp_name = self._gensym.new_name()
temp_assign = templates.replace(
'temp_name = expr', temp_name=temp_name, expr=node)[0]
self._add_pending_statement(temp_assign)
answer = templates.replace('temp_name', temp_name=temp_name)[0]
return answer
else:
raise ValueError('Do not know how to treat {}'.format(node))
def _ensure_fields_trivial(self, node):
for field in node._fields:
if field.startswith('__'):
continue
setattr(node, field, self._ensure_node_is_trivial(getattr(node, field)))
return node
def _visit_strict_statement(self, node, trivialize_children=True):
assert not self._pending_statements
node = self.generic_visit(node)
if trivialize_children:
self._ensure_fields_trivial(node)
results = self._consume_pending_statements()
results.append(node)
return results
def _visit_strict_expression(self, node):
node = self.generic_visit(node)
self._ensure_fields_trivial(node)
return node
# Note on code order: These are listed in the same order as the grammar
# elements on https://github.com/serge-sans-paille/gast
# FunctionDef, AsyncFunctionDef, and ClassDef should be correct by default.
def visit_Return(self, node):
return self._visit_strict_statement(node)
def visit_Delete(self, node):
return self._visit_strict_statement(node, trivialize_children=False)
def visit_Assign(self, node):
return self._visit_strict_statement(node, trivialize_children=False)
def visit_AugAssign(self, node):
return self._visit_strict_statement(node, trivialize_children=False)
def visit_Print(self, node):
return self._visit_strict_statement(node)
def visit_For(self, node):
assert not self._pending_statements
# It's important to visit node.iter first, because any statements created
# thereby need to live outside the body.
self.visit(node.iter)
node.iter = self._ensure_node_is_trivial(node.iter)
iter_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.iter, but that is both correct and
# cheap because by this point node.iter is trivial.
node = self.generic_visit(node)
assert not self._pending_statements
iter_stmts.append(node)
return iter_stmts
def visit_AsyncFor(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial AsyncFor nodes not supported yet '
'(need to think through the semantics).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_While(self, node):
if not self._is_node_trivial(node.test):
msg = ('While with nontrivial test not supported yet '
'(need to avoid precomputing the test).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_If(self, node):
assert not self._pending_statements
# It's important to visit node.test first, because any statements created
# thereby need to live outside the body.
self.visit(node.test)
node.test = self._ensure_node_is_trivial(node.test)
condition_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.test, but that is both correct and
# cheap because by this point node.test is trivial.
node = self.generic_visit(node)
assert not self._pending_statements
condition_stmts.append(node)
return condition_stmts
def visit_With(self, node):
assert not self._pending_statements
# It's important to visit node.items first, because any statements created
# thereby need to live outside the body.
for item in node.items:
self.visit(item)
node.items = [self._ensure_node_is_trivial(n) for n in node.items]
contexts_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.items, but that is both correct and
# cheap because by this point node.items is trivial.
node = self.generic_visit(node)
assert not self._pending_statements
contexts_stmts.append(node)
return contexts_stmts
def visit_AsyncWith(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial AsyncWith nodes not supported yet '
'(need to think through the semantics).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_Raise(self, node):
return self._visit_strict_statement(node)
# Try should be correct by default.
def visit_Assert(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial Assert nodes not supported yet '
'(need to avoid computing the test when assertions are off, and '
'avoid computing the irritant when the assertion does not fire).')
raise ValueError(msg)
return self.generic_visit(node)
# Import and ImportFrom should be correct by default.
def visit_Exec(self, node):
return self._visit_strict_statement(node)
# Global and Nonlocal should be correct by default.
def visit_Expr(self, node):
return self._visit_strict_statement(node, trivialize_children=False)
# Pass, Break, and Continue should be correct by default.
def visit_BoolOp(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial BoolOp nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_BinOp(self, node):
return self._visit_strict_expression(node)
def visit_UnaryOp(self, node):
return self._visit_strict_expression(node)
def visit_Lambda(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial Lambda nodes not supported '
'(cannot insert statements into lambda bodies).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_IfExp(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial IfExp nodes not supported yet '
'(need to convert to If statement, to evaluate branches lazily '
'and insert statements into them).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_Dict(self, node):
return self._visit_strict_expression(node)
def visit_Set(self, node):
return self._visit_strict_expression(node)
def visit_ListComp(self, node):
msg = ('ListComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_SetComp(self, node):
msg = ('SetComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_DictComp(self, node):
msg = ('DictComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_GeneratorExp(self, node):
msg = ('GeneratorExp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_Await(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial Await nodes not supported yet '
'(need to think through the semantics).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_Yield(self, node):
return self._visit_strict_expression(node)
def visit_YieldFrom(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial YieldFrom nodes not supported yet '
'(need to unit-test them in Python 2).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_Compare(self, node):
if len(node.ops) > 1:
msg = ('Multi-ary compare nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self._visit_strict_expression(node)
def visit_Call(self, node):
return self._visit_strict_expression(node)
def visit_Repr(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial Repr nodes not supported yet '
'(need to research their syntax and semantics).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_FormattedValue(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial FormattedValue nodes not supported yet '
'(need to unit-test them in Python 2).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_JoinedStr(self, node):
if not self._are_children_trivial(node):
msg = ('Nontrivial JoinedStr nodes not supported yet '
'(need to unit-test them in Python 2).')
raise ValueError(msg)
return self.generic_visit(node)
def visit_Attribute(self, node):
return self._visit_strict_expression(node)
def visit_Subscript(self, node):
return self._visit_strict_expression(node)
# Starred and Name are correct by default, because the right thing to do is to
# just recur.
def visit_List(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_trivial(node)
return node
def visit_Tuple(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_trivial(node)
return node
def transform(node, ctx, gensym_source=None):
"""Converts the given node to A-normal form (ANF).
The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
Args:
node: The node to transform.
ctx: transformer.EntityInfo. TODO(mdan): What information does this
argument provide?
gensym_source: An optional object with the same interface as `DummyGensym`
for generating unique names.
"""
return AnfTransformer(ctx, gensym_source=gensym_source).visit(node)
| tensorflow-master | tensorflow/python/autograph/pyct/common_transformers/anf.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anf module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.common_transformers import anf
from tensorflow.python.platform import test
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, ctx):
del ctx
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
# These two test functions have to be top-level, not nested, for compatibility
# with some unknown version of Python 2.7 preceding 2.7.15. Why? Because
# `exec` and nested function definitions _incomaptibly_ change the
# representation of local variables, such that `exec` inside a nested function
# definition is a syntax error in that version. The tuple form of `exec` fixes
# this problem, but apparently that was introduced in some unknown version of
# Python that's more recent than at least one version that we wish to be
# compatible with.
def exec_test_function():
# The point is to test A-normal form conversion of exec
# pylint: disable=exec-used
exec('computed' + 5 + 'stuff', globals(), locals())
def exec_expected_result():
# pylint: disable=exec-used
tmp_1001 = 'computed' + 5
tmp_1002 = tmp_1001 + 'stuff'
tmp_1003 = globals()
tmp_1004 = locals()
exec(tmp_1002, tmp_1003, tmp_1004)
class AnfTransformerTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
source_code=None, source_file=None, future_features=(), namespace=None)
return transformer.Context(entity_info)
def test_basic(self):
def test_function():
a = 0
return a
node, _ = parser.parse_entity(test_function, future_features=())
node = anf.transform(node, self._simple_context())
result, _, _ = compiler.ast_to_object(node)
self.assertEqual(test_function(), result.test_function())
def assert_same_ast(self, expected_node, node, msg=None):
expected_source = compiler.ast_to_source(expected_node, indentation=' ')
expected_str = textwrap.dedent(expected_source).strip()
got_source = compiler.ast_to_source(node, indentation=' ')
got_str = textwrap.dedent(got_source).strip()
self.assertEqual(expected_str, got_str, msg=msg)
def assert_body_anfs_as_expected(self, expected_fn, test_fn):
# Testing the code bodies only. Wrapping them in functions so the
# syntax highlights nicely, but Python doesn't try to execute the
# statements.
exp_node, _ = parser.parse_entity(expected_fn, future_features=())
node, _ = parser.parse_entity(test_fn, future_features=())
node = anf.transform(
node, self._simple_context(), gensym_source=DummyGensym)
exp_name = exp_node.name
# Ignoring the function names in the result because they can't be
# the same (because both functions have to exist in the same scope
# at the same time).
node.name = exp_name
self.assert_same_ast(exp_node, node)
# Check that ANF is idempotent
node_repeated = anf.transform(
node, self._simple_context(), gensym_source=DummyGensym)
self.assert_same_ast(node_repeated, node)
def test_binop_basic(self):
def test_function(x, y, z):
a = x + y + z
return a
def expected_result(x, y, z):
tmp_1001 = x + y
a = tmp_1001 + z
return a
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_if_basic(self):
def test_function(a, b, c, e, f, g):
if a + b + c:
d = e + f + g
return d
def expected_result(a, b, c, e, f, g):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
if tmp_1002:
tmp_1003 = e + f
d = tmp_1003 + g
return d
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_binop_and_return(self):
def test_function(b, c, d, e):
return (2 * b + c) + (d + e)
def expected_result(b, c, d, e):
tmp_1001 = 2 * b
tmp_1002 = tmp_1001 + c
tmp_1003 = d + e
tmp_1004 = tmp_1002 + tmp_1003
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_function_call_and_expr(self):
def test_function(call_something, a, b, y, z, c, d, e, f, g, h, i):
call_something(a + b, y * z, kwarg=c + d, *(e + f), **(g + h + i))
def expected_result(call_something, a, b, y, z, c, d, e, f, g, h, i):
tmp_1001 = g + h
tmp_1002 = a + b
tmp_1003 = y * z
tmp_1004 = e + f
tmp_1005 = c + d
tmp_1006 = tmp_1001 + i
call_something(tmp_1002, tmp_1003, kwarg=tmp_1005, *tmp_1004, **tmp_1006)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_print(self):
def test_function(a, b, c):
with a + b + c as d:
print(2 * d + 1)
def expected_result(a, b, c):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
with tmp_1002 as d:
tmp_1003 = 2 * d
tmp_1004 = tmp_1003 + 1
print(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_multi_value_assign(self):
def test_function(a, b, c):
x, y = a, a + b
(z, y), x = (c, y + b), x + a
return z, (y, x)
def expected_result(a, b, c):
tmp_1001 = a + b
x, y = a, tmp_1001
tmp_1002 = y + b
tmp_1003 = (c, tmp_1002)
tmp_1004 = x + a
(z, y), x = tmp_1003, tmp_1004
tmp_1005 = y, x
tmp_1006 = z, tmp_1005
return tmp_1006
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_deeply_nested_multi_value_assign(self):
def test_function(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
return [([(b, c), [d, e]], (f, g)), [(h, i, j), k]]
def expected_result(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
tmp_1001 = b, c
tmp_1002 = [d, e]
tmp_1003 = [tmp_1001, tmp_1002]
tmp_1004 = f, g
tmp_1005 = h, i, j
tmp_1006 = tmp_1003, tmp_1004
tmp_1007 = [tmp_1005, k]
tmp_1008 = [tmp_1006, tmp_1007]
return tmp_1008
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_local_definition_and_binary_compare(self):
def test_function():
def foo(a, b):
return 2 * a < b
return foo
def expected_result():
def foo(a, b):
tmp_1001 = 2 * a
tmp_1002 = tmp_1001 < b
return tmp_1002
return foo
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_list_literal(self):
def test_function(a, b, c, d, e, f):
return [a + b, c + d, e + f]
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = [tmp_1001, tmp_1002, tmp_1003]
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_tuple_literal_and_unary(self):
def test_function(a, b, c, d, e, f):
return (a + b, -(c + d), e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = c + d
tmp_1002 = a + b
tmp_1003 = -tmp_1001
tmp_1004 = e + f
tmp_1005 = (tmp_1002, tmp_1003, tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_set_literal(self):
def test_function(a, b, c, d, e, f):
return set(a + b, c + d, e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = set(tmp_1001, tmp_1002, tmp_1003)
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_dict_literal_and_repr(self):
def test_function(foo, bar, baz):
return repr({foo + bar + baz: 7 | 8})
def expected_result(foo, bar, baz):
tmp_1001 = foo + bar
tmp_1002 = tmp_1001 + baz
tmp_1003 = 7 | 8
tmp_1004 = {tmp_1002: tmp_1003}
tmp_1005 = repr(tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_field_read_and_write(self):
def test_function(a, d):
a.b.c = d.e.f + 3
def expected_result(a, d):
tmp_1001 = a.b
tmp_1002 = d.e
tmp_1003 = tmp_1002.f
tmp_1001.c = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_subscript_read_and_write(self):
def test_function(a, b, c, d, e, f):
a[b][c] = d[e][f] + 3
def expected_result(a, b, c, d, e, f):
tmp_1001 = a[b]
tmp_1002 = d[e]
tmp_1003 = tmp_1002[f]
tmp_1001[c] = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_augassign_and_delete(self):
def test_function(a, x, y, z):
a += x + y + z
del a
del z[y][x]
def expected_result(a, x, y, z):
tmp_1001 = x + y
a += tmp_1001 + z
del a
tmp_1002 = z[y]
del tmp_1002[x]
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_raise_yield_and_raise(self):
def test_function(a, c, some_computed, exception):
yield a ** c
raise some_computed('complicated' + exception)
def expected_result(a, c, some_computed, exception):
tmp_1001 = a ** c
yield tmp_1001
tmp_1002 = 'complicated' + exception
tmp_1003 = some_computed(tmp_1002)
raise tmp_1003
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_if_with_expressions(self):
def test_function(foo, bar, function, quux, quozzle, w, x, y, z):
with foo + bar:
function(x + y)
if quux + quozzle:
function(z / w)
def expected_result(foo, bar, function, quux, quozzle, w, x, y, z):
tmp_1001 = foo + bar
with tmp_1001:
tmp_1002 = x + y
function(tmp_1002)
tmp_1003 = quux + quozzle
if tmp_1003:
tmp_1004 = z / w
function(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_exec(self):
self.assert_body_anfs_as_expected(exec_expected_result, exec_test_function)
def test_simple_while_and_assert(self):
def test_function(foo, quux):
while foo:
assert quux
foo = foo + 1 * 3
def expected_result(foo, quux):
while foo:
assert quux
tmp_1001 = 1 * 3
foo = foo + tmp_1001
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_for(self):
def test_function(compute, something, complicated, foo):
for foo in compute(something + complicated):
bar = foo + 1 * 3
return bar
def expected_result(compute, something, complicated, foo):
tmp_1001 = something + complicated
tmp_1002 = compute(tmp_1001)
for foo in tmp_1002:
tmp_1003 = 1 * 3
bar = foo + tmp_1003
return bar
self.assert_body_anfs_as_expected(expected_result, test_function)
# This test collects several examples where the definition of A-normal form
# implemented by this transformer is questionable. Mostly it's here to spell
# out what the definition is in these cases.
def test_controversial(self):
def test_function(b, c, d, f):
a = c + d
a.b = c + d
a[b] = c + d
a += c + d
a, b = c
a, b = c, d
a = f(c)
a = f(c + d)
a[b + d] = f.e(c + d)
def expected_result(b, c, d, f):
a = c + d
a.b = c + d # Should be a.b = tmp? (Definitely not tmp = c + d)
a[b] = c + d # Should be a[b] = tmp? (Definitely not tmp = c + d)
a += c + d # Should be a += tmp? (Definitely not tmp = c + d)
a, b = c # Should be a = c[0], b = c[1]? Or not?
a, b = c, d # Should be a = c, b = d? Or not?
a = f(c)
tmp_1001 = c + d
a = f(tmp_1001)
tmp_1002 = b + d
tmp_1003 = f.e
tmp_1004 = c + d
a[tmp_1002] = tmp_1003(tmp_1004) # Or should be a[tmp1] = tmp2?
self.assert_body_anfs_as_expected(expected_result, test_function)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/pyct/common_transformers/anf_test.py |
tensorflow-master | tensorflow/python/autograph/pyct/common_transformers/__init__.py |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_info module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct.testing import codegen
from tensorflow.python.platform import test
class CodeGenTest(test.TestCase):
def test_codegen_gens(self):
np.random.seed(0)
for _ in range(1000):
node = codegen.generate_random_functiondef()
fn = compiler.ast_to_object(node)
self.assertIsNotNone(
fn, 'Generated invalid AST that could not convert to source.')
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/autograph/pyct/testing/codegen_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with basic entity definitions for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement # An extra future import for testing.
def simple_function(x):
"""Docstring."""
return x # comment
def nested_functions(x):
"""Docstring."""
def inner_fn(y):
return y
return inner_fn(x)
def function_with_print():
print('foo')
simple_lambda = lambda: None
class SimpleClass(object):
def simple_method(self):
return self
def method_with_print(self):
print('foo')
def function_with_multiline_call(x):
"""Docstring."""
return range(
x,
x + 1,
)
| tensorflow-master | tensorflow/python/autograph/pyct/testing/basic_definitions.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random code generation for testing/fuzzing."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import string
import gast
import numpy as np
from tensorflow.python.autograph.pyct import templates
class NodeSampler(object):
sample_map = None
def sample(self):
nodes, magnitudes = zip(*self.sample_map.items())
return np.random.choice(
nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes))
class StatementSampler(NodeSampler):
sample_map = dict((
(gast.Assign, 10),
(gast.Print, 1),
(gast.If, 2),
(gast.While, 2),
(gast.For, 0),
))
class ExpressionSampler(NodeSampler):
sample_map = dict((
(gast.UnaryOp, 1),
(gast.BinOp, 8),
(gast.Name, 1),
(gast.Call, 0),
))
class CompareSampler(NodeSampler):
sample_map = dict((
(gast.Eq, 1),
(gast.NotEq, 1),
(gast.Lt, 1),
(gast.LtE, 1),
(gast.Gt, 1),
(gast.GtE, 1),
(gast.Is, 1),
(gast.IsNot, 1),
))
class BinaryOpSampler(NodeSampler):
sample_map = dict((
(gast.Add, 1),
(gast.Sub, 1),
(gast.Mult, 1),
(gast.Div, 1),
(gast.FloorDiv, 1),
(gast.Mod, 1),
(gast.Pow, 1),
))
class UnaryOpSampler(NodeSampler):
sample_map = dict(((gast.USub, 1), (gast.UAdd, 0)))
class NameSampler(NodeSampler):
sample_map = dict((
('new', 1),
('existing', 1),
))
N_CONTROLFLOW_STATEMENTS = 10
N_FUNCTIONDEF_STATEMENTS = 10
class CodeGenerator(object):
"""Generate random syntactically-valid Python ASTs."""
def __init__(self, max_depth=3, depth=0):
self.max_depth = max_depth
self.depth = depth
def generate_statement(self):
"""Generate a statement node, dispatching to the correct class method."""
desired_node = StatementSampler().sample()
self.depth += 1
# Enforce some constraints on generating statements.
# E.g., if statements need at least 3 readable variables.
# If we fail to satisfy our constraints, draw another sample.
if desired_node in (gast.While, gast.For, gast.If):
if self.depth > self.max_depth:
return self.generate_statement()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
visitor = getattr(self, method)
node = visitor()
self.depth -= 1
return node
def sample_node_list(self, low, high, generator):
"""Generate a list of statements of random length.
Args:
low: Fewest number of statements to generate.
high: Highest number of statements to generate.
generator: Function to call to generate nodes.
Returns:
A list of statements.
"""
statements = []
for _ in range(np.random.randint(low, high)):
statements.append(generator())
return statements
def generate_Name(self, ctx=gast.Load()):
variable_name = '_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(4))
return gast.Name(variable_name, ctx=ctx, annotation=None)
def generate_BinOp(self):
# TODO(alexbw): convert to generate_expression when we get to limit
# expression depth.
op = BinaryOpSampler().sample()()
return gast.BinOp(self.generate_Name(), op, self.generate_Name())
def generate_Compare(self):
op = CompareSampler().sample()()
return gast.Compare(self.generate_Name(), [op], [self.generate_Name()])
def generate_UnaryOp(self):
operand = self.generate_Name()
op = UnaryOpSampler().sample()()
return gast.UnaryOp(op, operand)
def generate_expression(self):
desired_node = ExpressionSampler().sample()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
generator = getattr(self, method)
return generator()
def generate_Assign(self):
"""Generate an Assign node."""
# Generate left-hand side
target_node = self.generate_Name(gast.Store())
# Generate right-hand side
value_node = self.generate_expression()
# Put it all together
node = gast.Assign(targets=[target_node], value=value_node)
return node
def generate_If(self):
"""Generate an If node."""
test = self.generate_Compare()
# Generate true branch statements
body = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
# Generate false branch statements
orelse = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
node = gast.If(test, body, orelse)
return node
def generate_While(self):
"""Generate a While node."""
test = self.generate_Compare()
body = self.sample_node_list(
low=1, high=N_CONTROLFLOW_STATEMENTS, generator=self.generate_statement)
orelse = [] # not generating else statements
node = gast.While(test, body, orelse)
return node
def generate_Call(self):
raise NotImplementedError
def generate_Return(self):
return gast.Return(self.generate_expression())
def generate_Print(self):
return templates.replace('print(x)', x=self.generate_expression())[0]
def generate_FunctionDef(self):
"""Generate a FunctionDef node."""
# Generate the arguments, register them as available
arg_vars = self.sample_node_list(
low=2, high=10, generator=lambda: self.generate_Name(gast.Param()))
args = gast.arguments(arg_vars, None, [], [], None, [])
# Generate the function body
body = self.sample_node_list(
low=1, high=N_FUNCTIONDEF_STATEMENTS, generator=self.generate_statement)
body.append(self.generate_Return())
fn_name = self.generate_Name().id
node = gast.FunctionDef(fn_name, args, body, (), None)
return node
def generate_random_functiondef():
return CodeGenerator().generate_FunctionDef()
| tensorflow-master | tensorflow/python/autograph/pyct/testing/codegen.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with test decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
def wrapping_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def standalone_decorator(f):
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return standalone_wrapper
def functional_decorator():
def decorator(f):
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return functional_wrapper
return decorator
| tensorflow-master | tensorflow/python/autograph/pyct/testing/decorators.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics
from tensorflow.python.platform import test
class KerasFunctionalMetricsTest(test.TestCase):
def test_metrics(self):
with self.cached_session():
y_a = K.variable(np.random.random((6, 7)))
y_b = K.variable(np.random.random((6, 7)))
for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]:
output = metric(y_a, y_b)
self.assertEqual(K.eval(output).shape, (6,))
def test_sparse_categorical_accuracy_int(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.randint(0, 7, (6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
# Test correctness if the shape of y_true is (num_samples,)
y_true = K.variable([1., 0., 0., 0.])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
# Test correctness if the shape of y_true is (num_samples, 1)
y_true = K.variable([[1.], [0.], [0.], [0.]])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
def test_sparse_categorical_accuracy_float(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.random((6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
def test_sparse_categorical_accuracy_eager(self):
"""Tests that ints passed in via Eager return results. See b/113504761."""
with context.eager_mode():
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])
def test_sparse_categorical_accuracy_float_eager(self):
"""Tests that floats passed in via Eager return results. See b/113504761."""
with context.eager_mode():
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6, dtype=np.float32).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])
def test_sparse_top_k_categorical_accuracy(self):
with self.cached_session():
# Test correctness if the shape of y_true is (num_samples, 1)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[1], [0]]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
# Test correctness if the shape of y_true is (num_samples,)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([1, 0]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
def test_top_k_categorical_accuracy(self):
with self.cached_session():
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(np.mean(result), 1)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(np.mean(result), 0.5)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(np.mean(result), 0.)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/metrics_functional_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@test_util.run_all_in_graph_and_eager_modes
class KerasActivationsTest(test.TestCase):
def test_serialization(self):
all_activations = ['softmax', 'relu', 'elu', 'tanh',
'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu']
for name in all_activations:
fn = keras.activations.get(name)
ref_fn = getattr(keras.activations, name)
assert fn == ref_fn
config = keras.activations.serialize(fn)
fn = keras.activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = keras.activations.get(fn_v2_key)
config = keras.activations.serialize(fn_v2)
fn = keras.activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = keras.layers.LeakyReLU(alpha=0.1)
layer = keras.layers.Dense(3, activation=activation)
config = keras.layers.serialize(layer)
deserialized_layer = keras.layers.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
with self.assertRaises(ValueError):
x = keras.backend.placeholder(ndim=1)
keras.activations.softmax(x)
def test_temporal_softmax(self):
x = keras.backend.placeholder(shape=(2, 2, 3))
f = keras.backend.function([x], [keras.activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=keras.backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=keras.backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = keras.backend.placeholder(ndim=2)
f = keras.backend.function([x], [keras.activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=keras.backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = keras.backend.placeholder(ndim=2)
exp = keras.activations.tanh(x)
f = keras.backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = keras.backend.placeholder(ndim=2)
exp = keras.activations.exponential(x)
f = keras.backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, keras.activations.linear(x))
def test_invalid_usage(self):
with self.assertRaises(ValueError):
keras.activations.get('unknown')
# The following should be possible but should raise a warning:
keras.activations.get(keras.layers.LeakyReLU())
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/activations_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import cosine_similarity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.metrics.Metric')
@six.add_metaclass(abc.ABCMeta)
class Metric(Layer):
"""Encapsulates metric logic and state.
Usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with tf.keras API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.compat.v1.train.RMSPropOptimizer(0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_weights(sample_weight, values)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super(Metric, self).__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# TODO(psv): We are excluding wrapping `update_state` of built-in metrics
# with function here because of b/121302287. With this, built-in metrics
# will continue to work with TPUs and custom metrics will not, however
# users writing custom metrics need not worry about control dependencies
# and returning ops.
if cls.__module__ == Metric.__module__:
update_state_fn = obj.update_state
else:
update_state_fn = def_function.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj)
obj.result = types.MethodType(metrics_utils.result_wrapper(obj.result), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
with ops.control_dependencies([update_op]):
result_t = self.result() # pylint: disable=not-callable
# We are adding the metric object as metadata on the result tensor.
# This is required when we want to use a metric with `add_metric` API on
# a Model/Layer in graph mode. This metric instance will later be used
# to reset variable state after each epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self # pylint: disable=protected-access
return result_t
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
return distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs)
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {'name': self.name, 'dtype': self.dtype}
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
K.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Please use `tf.config.experimental_run_functions_eagerly(True)` to execute
this function eagerly for debugging or profiling.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
raise NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape=(),
aggregation=tf_variables.VariableAggregation.SUM,
synchronization=tf_variables.VariableSynchronization.ON_READ,
initializer=None,
dtype=None):
"""Adds state variable. Only for use by subclasses."""
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values."""
def __init__(self, reduction, name, dtype=None):
"""Creates a `Reduce` instance.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
with ops.init_scope():
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the reduction metric.
For example, if `values` is [1, 3, 5, 7] and reduction=SUM_OVER_BATCH_SIZE,
then the value of `result()` is 4. If the `sample_weight` is specified as
[1, 1, 0, 0] then value of `result()` would be 2.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
@keras_export('keras.metrics.Sum')
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum of
`values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
Usage:
```python
m = tf.keras.metrics.Sum()
m.update_state([1, 3, 5, 7])
print('Final result: ', m.result().numpy()) # Final result: 16.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='sum', dtype=None):
"""Creates a `Sum` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM,
name=name, dtype=dtype)
@keras_export('keras.metrics.Mean')
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.Mean()
m.update_state([1, 3, 5, 7])
print('Final result: ', m.result().numpy()) # Final result: 4.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile('sgd', loss='mse')
```
"""
def __init__(self, name='mean', dtype=None):
"""Creates a `Mean` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Mean, self).__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
@keras_export('keras.metrics.MeanRelativeError')
class MeanRelativeError(Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used to
compute the mean relative absolute error. This average is weighted by
`sample_weight`, and it is ultimately returned as `mean_relative_error`:
an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
# metric = mean(|y_pred - y_true| / normalizer)
# = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
# = 5/4 = 1.25
print('Final result: ', m.result().numpy()) # Final result: 1.25
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
def __init__(self, normalizer, name=None, dtype=None):
"""Creates a `MeanRelativeError` instance.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanRelativeError, self).__init__(name=name, dtype=dtype)
normalizer = math_ops.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_pred, y_true], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
y_pred, self.normalizer = confusion_matrix.remove_squeezable_dimensions(
y_pred, self.normalizer)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = math_ops.div_no_nan(
math_ops.abs(y_true - y_pred), self.normalizer)
return super(MeanRelativeError, self).update_state(
relative_errors, sample_weight=sample_weight)
def get_config(self):
n = self.normalizer
config = {'normalizer': K.eval(n) if is_tensor_or_variable(n) else n}
base_config = super(MeanRelativeError, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `MeanMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be
a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Accuracy')
class Accuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [1, 2, 3, 4] and `y_pred` is [0, 2, 3, 4]
then the accuracy is 3/4 or .75. If the weights were specified as
[1, 1, 0, 0] then the accuracy would be 1/2 or .5.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.Accuracy()
m.update_state([1, 2, 3, 4], [0, 2, 3, 4])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
@keras_export('keras.metrics.BinaryAccuracy')
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [1, 1, 0, 0] and `y_pred` is [0.98, 1, 0, 0.6]
then the binary accuracy is 3/4 or .75. If the weights were specified as
[1, 0, 0, 1] then the binary accuracy would be 1/2 or .5.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.BinaryAccuracy()
m.update_state([1, 1, 0, 0], [0.98, 1, 0, 0.6])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
"""Creates a `BinaryAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
"""
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@keras_export('keras.metrics.CategoricalAccuracy')
class CategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
For example, if `y_true` is [[0, 0, 1], [0, 1, 0]] and `y_pred` is
[[0.1, 0.9, 0.8], [0.05, 0.95, 0]] then the categorical accuracy is 1/2 or .5.
If the weights were specified as [0.7, 0.3] then the categorical accuracy
would be .3. You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `categorical accuracy`: an idempotent operation that
simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities, rather
than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.CategoricalAccuracy()
m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name='categorical_accuracy', dtype=None):
"""Creates a `CategoricalAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(CategoricalAccuracy, self).__init__(
categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.SparseCategoricalAccuracy')
class SparseCategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches integer labels.
For example, if `y_true` is [[2], [1]] and `y_pred` is
[[0.1, 0.9, 0.8], [0.05, 0.95, 0]] then the categorical accuracy is 1/2 or .5.
If the weights were specified as [0.7, 0.3] then the categorical accuracy
would be .3. You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.SparseCategoricalAccuracy()
m.update_state([[2], [1]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.TopKCategoricalAccuracy')
class TopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Usage:
```python
m = tf.keras.metrics.TopKCategoricalAccuracy()
m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
"""Creates a `TopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
@keras_export('keras.metrics.SparseTopKCategoricalAccuracy')
class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Usage:
```python
m = tf.keras.metrics.SparseTopKCategoricalAccuracy()
m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
"""Creates a `SparseTopKCategoricalAccuracy` instance.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition."""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
"""Creates a `_ConfusionMatrixConditionCount` instance.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the given confusion matrix condition statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.FalsePositives')
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
For example, if `y_true` is [0, 1, 0, 0] and `y_pred` is [0, 0, 1, 1]
then the false positives value is 2. If the weights were specified as
[0, 0, 1, 0] then the false positives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.FalsePositives()
m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalsePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalsePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.FalseNegatives')
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [0, 1, 0, 0]
then the false negatives value is 2. If the weights were specified as
[0, 0, 1, 0] then the false negatives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.FalseNegatives()
m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `FalseNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(FalseNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TrueNegatives')
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
For example, if `y_true` is [0, 1, 0, 0] and `y_pred` is [1, 1, 0, 0]
then the true negatives value is 2. If the weights were specified as
[0, 0, 1, 0] then the true negatives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.TrueNegatives()
m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TrueNegatives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TruePositives')
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the true positives value is 2. If the weights were specified as
[0, 0, 1, 0] then the true positives value would be 1.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.TruePositives()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 2
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
"""Creates a `TruePositives` instance.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(TruePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.Precision')
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the precision value is 2/(2+1) ie. 0.66. If the weights were specified as
[0, 0, 1, 0] then the precision value would be 1.
The metric creates two local variables, `true_positives` and `false_positives`
that are used to compute the precision. This value is ultimately returned as
`precision`, an idempotent operation that simply divides `true_positives`
by the sum of `true_positives` and `false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Usage:
```python
m = tf.keras.metrics.Precision()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 0.66
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Precision()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Precision` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Precision, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_positives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Precision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Recall')
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
For example, if `y_true` is [0, 1, 1, 1] and `y_pred` is [1, 0, 1, 1]
then the recall value is 2/(2+1) ie. 0.66. If the weights were specified as
[0, 0, 1, 0] then the recall value would be 1.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Usage:
```python
m = tf.keras.metrics.Recall()
m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
print('Final result: ', m.result().numpy()) # Final result: 0.66
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Recall()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
"""Creates a `Recall` instance.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Recall, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Recall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@six.add_metaclass(abc.ABCMeta)
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
def __init__(self, value, num_thresholds=200, name=None, dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def reset_states(self):
num_thresholds = len(self.thresholds)
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
@keras_export('keras.metrics.SensitivityAtSpecificity')
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes the sensitivity at a given specificity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
sensitivity at the given specificity. The threshold for the given specificity
value is computed and used to evaluate the corresponding sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
```python
m = tf.keras.metrics.SensitivityAtSpecificity(0.4, num_thresholds=1)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(self, specificity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SensitivityAtSpecificity` instance.
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
self.specificity = specificity
self.num_thresholds = num_thresholds
super(SensitivityAtSpecificity, self).__init__(
specificity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate specificities at all the thresholds.
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
# Find the index of the threshold where the specificity is closest to the
# given specificity.
min_index = math_ops.argmin(
math_ops.abs(specificities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute sensitivity at that index.
return math_ops.div_no_nan(
self.true_positives[min_index],
self.true_positives[min_index] + self.false_negatives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'specificity': self.specificity
}
base_config = super(SensitivityAtSpecificity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.SpecificityAtSensitivity')
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes the specificity at a given sensitivity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
specificity at the given sensitivity. The threshold for the given sensitivity
value is computed and used to evaluate the corresponding specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Usage:
```python
m = tf.keras.metrics.SpecificityAtSensitivity(0.8, num_thresholds=1)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None):
"""Creates a `SpecificityAtSensitivity` instance.
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super(SpecificityAtSensitivity, self).__init__(
sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
# Calculate sensitivities at all the thresholds.
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
# Find the index of the threshold where the sensitivity is closest to the
# given specificity.
min_index = math_ops.argmin(
math_ops.abs(sensitivities - self.value), axis=0)
min_index = math_ops.cast(min_index, dtypes.int32)
# Compute specificity at that index.
return math_ops.div_no_nan(
self.true_negatives[min_index],
self.true_negatives[min_index] + self.false_positives[min_index])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'sensitivity': self.sensitivity
}
base_config = super(SpecificityAtSensitivity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.AUC')
class AUC(Metric):
"""Computes the approximate AUC (Area under the curve) via a Riemann sum.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the AUC.
To discretize the AUC curve, a linearly spaced set of thresholds is used to
compute pairs of recall and precision values. The area under the ROC-curve is
therefore computed using the height of the recall values by the false positive
rate, while the area under the PR-curve is the computed using the height of
the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`. The `thresholds` parameter can be
used to manually specify thresholds which split the predictions more evenly.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.AUC(num_thresholds=3)
m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
# auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
```
"""
def __init__(self,
num_thresholds=200,
curve='ROC',
summation_method='interpolation',
name=None,
dtype=None,
thresholds=None):
"""Creates an `AUC` instance.
Args:
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
curve: (Optional) Specifies the name of the curve to be computed, 'ROC'
[default] or 'PR' for the Precision-Recall-curve.
summation_method: (Optional) Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'interpolation' [default],
applies mid-point summation scheme for `ROC`. For PR-AUC, interpolates
(true/false) positives but not the ratio that is precision (see Davis
& Goadrich 2006 for details); 'minoring' that applies left summation
for increasing intervals and right summation for decreasing intervals;
'majoring' that does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in [0, 1]. Endpoint thresholds
equal to {-epsilon, 1+epsilon} for a small positive epsilon value will
be automatically included with these to correctly handle predictions
equal to exactly 0 or 1.
"""
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve):
raise ValueError('Invalid curve: "{}". Valid options are: "{}"'.format(
curve, list(metrics_utils.AUCCurve)))
if isinstance(
summation_method,
metrics_utils.AUCSummationMethod) and summation_method not in list(
metrics_utils.AUCSummationMethod):
raise ValueError(
'Invalid summation method: "{}". Valid options are: "{}"'.format(
summation_method, list(metrics_utils.AUCSummationMethod)))
# Update properties.
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
else:
if num_thresholds <= 1:
raise ValueError('`num_thresholds` must be > 1.')
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self.thresholds = [0.0 - K.epsilon()] + thresholds + [1.0 + K.epsilon()]
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method)
super(AUC, self).__init__(name=name, dtype=dtype)
# Create metric variables
self.true_positives = self.add_weight(
'true_positives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(self.num_thresholds,),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables({
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
}, y_true, y_pred, self.thresholds, sample_weight=sample_weight)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each interval
[A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = self.true_positives + self.false_positives
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = math_ops.div_no_nan(
dtp, math_ops.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
math_ops.div_no_nan(
p[:self.num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
math_ops.div_no_nan(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(self.true_positives[1:] + self.false_negatives[1:],
0),
name='pr_auc_increment'),
name='interpolate_pr_auc')
def result(self):
if (self.curve == metrics_utils.AUCCurve.PR and
self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = math_ops.div_no_nan(self.false_positives,
self.false_positives + self.true_negatives)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = math_ops.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = math_ops.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
return math_ops.reduce_sum(
math_ops.multiply(x[:self.num_thresholds - 1] - x[1:], heights),
name=self.name)
def reset_states(self):
K.batch_set_value(
[(v, np.zeros((self.num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'curve': self.curve.value,
'summation_method': self.summation_method.value,
# We remove the endpoint thresholds as an inverse of how the thresholds
# were initialized. This ensures that a metric initialized from this
# config has the same thresholds.
'thresholds': self.thresholds[1:-1],
}
base_config = super(AUC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.CosineSimilarity')
class CosineSimilarity(MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
cosine similarity = (a . b) / ||a|| ||b||
[Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity)
For example, if `y_true` is [0, 1, 1], and `y_pred` is [1, 0, 1], the cosine
similarity is 0.5.
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Usage:
```python
m = tf.keras.metrics.CosineSimilarity(axis=1)
m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
# l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
# l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
# l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
# result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
= ((0. + 0.) + (0.5 + 0.5)) / 2
print('Final result: ', m.result().numpy()) # Final result: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name='cosine_similarity', dtype=None, axis=-1):
"""Creates a `CosineSimilarity` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
"""
super(CosineSimilarity, self).__init__(
cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export('keras.metrics.MeanAbsoluteError')
class MeanAbsoluteError(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean absolute error is 3/4 (0.75).
Usage:
```python
m = tf.keras.metrics.MeanAbsoluteError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean absolute percentage error is 5e+08.
Usage:
```python
m = tf.keras.metrics.MeanAbsolutePercentageError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 5e+08
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredError')
class MeanSquaredError(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean squared error is 3/4 (0.75).
Usage:
```python
m = tf.keras.metrics.MeanSquaredError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.], and `y_pred` is [1., 1., 1., 0.]
the mean squared logarithmic error is 0.36034.
Usage:
```python
m = tf.keras.metrics.MeanSquaredLogarithmicError()
m.update_state([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Final result: ', m.result().numpy()) # Final result: 0.36034
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export('keras.metrics.Hinge')
class Hinge(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
For example, if `y_true` is [-1., 1., 1.], and `y_pred` is [0.6, -0.7, -0.5]
the hinge metric value is 1.6.
Usage:
```python
m = tf.keras.metrics.Hinge()
m.update_state([-1., 1., 1.], [0.6, -0.7, -0.5])
# result = max(0, 1-y_true * y_pred) = [1.6 + 1.7 + 1.5] / 3
print('Final result: ', m.result().numpy()) # Final result: 1.6
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
@keras_export('keras.metrics.SquaredHinge')
class SquaredHinge(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
For example, if `y_true` is [-1., 1., 1.], and `y_pred` is [0.6, -0.7, -0.5]
the squared hinge metric value is 2.6.
Usage:
```python
m = tf.keras.metrics.SquaredHinge()
m.update_state([-1., 1., 1.], [0.6, -0.7, -0.5])
# result = max(0, 1-y_true * y_pred) = [1.6^2 + 1.7^2 + 1.5^2] / 3
print('Final result: ', m.result().numpy()) # Final result: 2.6
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
@keras_export('keras.metrics.CategoricalHinge')
class CategoricalHinge(MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
For example, if `y_true` is [0., 1., 1.], and `y_pred` is [1., 0., 1.]
the categorical hinge metric value is 1.0.
Usage:
```python
m = tf.keras.metrics.CategoricalHinge()
m.update_state([0., 1., 1.], [1., 0., 1.])
print('Final result: ', m.result().numpy()) # Final result: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
@keras_export('keras.metrics.RootMeanSquaredError')
class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Usage:
```python
m = tf.keras.metrics.RootMeanSquaredError()
m.update_state([2., 4., 6.], [1., 3., 2.])
print('Final result: ', m.result().numpy()) # Final result: 2.449
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
@keras_export('keras.metrics.LogCoshError')
class LogCoshError(MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Usage:
```python
m = tf.keras.metrics.LogCoshError()
m.update_state([0., 1., 1.], [1., 0., 1.])
print('Final result: ', m.result().numpy()) # Final result: 0.289
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.LogCoshError()])
```
"""
def __init__(self, name='logcosh', dtype=None):
super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)
@keras_export('keras.metrics.Poisson')
class Poisson(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Usage:
```python
m = tf.keras.metrics.Poisson()
m.update_state([1, 9, 2], [4, 8, 12])
print('Final result: ', m.result().numpy()) # Final result: -4.63
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
@keras_export('keras.metrics.KLDivergence')
class KLDivergence(MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.
`metric = y_true * log(y_true / y_pred)`
Usage:
```python
m = tf.keras.metrics.KLDivergence()
m.update_state([.4, .9, .2], [.5, .8, .12])
print('Final result: ', m.result().numpy()) # Final result: -0.043
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', metrics=[tf.keras.metrics.KLDivergence()])
```
"""
def __init__(self, name='kullback_leibler_divergence', dtype=None):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name, dtype=dtype)
@keras_export('keras.metrics.MeanIoU')
class MeanIoU(Metric):
"""Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Usage:
```python
m = tf.keras.metrics.MeanIoU(num_classes=2)
m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
# cm = [[1, 1],
[1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
# result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
print('Final result: ', m.result().numpy()) # Final result: 0.33
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(self, num_classes, name=None, dtype=None):
"""Creates a `MeanIoU` instance.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix. Setting
# the type to be `float64` as required by confusion_matrix_ops.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=init_ops.zeros_initializer,
dtype=dtypes.float64)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None and sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = math_ops.cast(
array_ops.diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))
iou = math_ops.div_no_nan(true_positives, denominator)
return math_ops.div_no_nan(
math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)
def reset_states(self):
K.set_value(self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(MeanIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.MeanTensor')
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Usage:
```python
m = tf.keras.metrics.MeanTensor()
m.update_state([0, 1, 2, 3])
m.update_state([4, 5, 6, 7])
print('Result: ', m.result().numpy()) # Result: [2, 3, 4, 5]
m.update_state([12, 10, 8, 6], sample_weights= [0, 0.2, 0.5, 1])
print('Result: ', m.result().numpy()) # Result: [2, 3.636, 4.8, 5.333]
```
"""
def __init__(self, name='mean_tensor', dtype=None):
"""Creates a `MeanTensor` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_states(self):
if self._built:
K.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
@keras_export('keras.metrics.BinaryCrossentropy')
class BinaryCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Usage:
```python
m = tf.keras.metrics.BinaryCrossentropy()
m.update_state([1., 0., 1., 0.], [1., 1., 1., 0.])
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
print('Final result: ', m.result().numpy()) # Final result: 3.833
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
"""Creates a `BinaryCrossentropy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`"
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.CategoricalCrossentropy')
class CategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). Here we assume that labels are given as a `one_hot`
representation. eg., When labels values are [2, 0, 1],
`y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
Usage:
```python
m = tf.keras.metrics.CategoricalCrossentropy()
m.update_state([[0, 1, 0], [0, 0, 1]],
[[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# xent = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced xent = (0.051 + 2.302) / 2
print('Final result: ', m.result().numpy()) # Final result: 1.176
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
"""
def __init__(self,
name='categorical_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
```python
m = tf.keras.metrics.SparseCategoricalCrossentropy()
m.update_state(
[1, 2],
[[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# logits = log(y_pred)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# xent = -sum(y * log(softmax), 1)
# log(softmax) = [[-2.9957, -0.0513, -16.1181], [-2.3026, -0.2231, -2.3026]]
# y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
print('Final result: ', m.result().numpy()) # Final result: 1.176
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile(
'sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional ) Whether `y_pred` is expected to be a logits tensor.
By default, we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
return super(SumOverBatchSizeMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def accuracy(y_true, y_pred):
[y_pred, y_true], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true])
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = math_ops.cast(y_pred, y_true.dtype)
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.binary_accuracy')
def binary_accuracy(y_true, y_pred, threshold=0.5):
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
def categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
@keras_export('keras.metrics.sparse_categorical_accuracy')
def sparse_categorical_accuracy(y_true, y_pred):
y_pred_rank = ops.convert_to_tensor(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor(y_true).shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.top_k_categorical_accuracy')
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), K.floatx())
@keras_export('keras.metrics.sparse_top_k_categorical_accuracy')
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
y_pred_rank = ops.convert_to_tensor(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor(y_true).shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), K.floatx())
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine_proximity = cosine_similarity
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
with ops.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
if metrics is None:
return None
if isinstance(metrics, dict):
return {key: clone_metric(value) for key, value in metrics.items()}
return [clone_metric(metric) for metric in metrics]
@keras_export('keras.metrics.serialize')
def serialize(metric):
return serialize_keras_object(metric)
@keras_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@keras_export('keras.metrics.get')
def get(identifier):
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier: %s' % identifier)
| tensorflow-master | tensorflow/python/keras/metrics.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `models.py` (model cloning, mainly)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class TestModel(keras.Model):
"""A model subclass."""
def __init__(self, n_outputs=4, trainable=True):
"""A test class with one dense layer and number of outputs as a variable."""
super(TestModel, self).__init__()
self.layer1 = keras.layers.Dense(n_outputs)
self.n_outputs = resource_variable_ops.ResourceVariable(
n_outputs, trainable=trainable)
def call(self, x):
return self.layer1(x)
def _get_layers(input_shape=(4,), add_input_layer=False):
if add_input_layer:
model_layers = [keras.layers.InputLayer(input_shape=input_shape),
keras.layers.Dense(4)]
elif input_shape:
model_layers = [keras.layers.Dense(4, input_shape=input_shape)]
else:
model_layers = [keras.layers.Dense(4)]
model_layers += [
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.5),
keras.layers.Dense(4)]
return model_layers
def _get_model(input_shape=(4,)):
model_layers = _get_layers(input_shape=None, add_input_layer=False)
return testing_utils.get_model_from_layers(
model_layers, input_shape=input_shape)
class TestModelCloning(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'has_input_layer',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': False},
{'testcase_name': 'no_input_layer',
'input_shape': None,
'add_input_layer': False,
'share_weights': False},
{'testcase_name': 'has_input_layer_share_weights',
'input_shape': (4,),
'add_input_layer': True,
'share_weights': True},
{'testcase_name': 'no_input_layer_share_weights',
'input_shape': None,
'add_input_layer': False,
'share_weights': True},
])
def test_clone_sequential_model(
self, input_shape, add_input_layer, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_sequential_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
model = models.Sequential(_get_layers(input_shape, add_input_layer))
# Sanity check
self.assertEqual(
isinstance(model._layers[0], keras.layers.InputLayer),
add_input_layer)
self.assertEqual(model._is_graph_network, add_input_layer)
# With placeholder creation -- clone model should have an InputLayer
# if the original model has one.
new_model = clone_fn(model)
self.assertEqual(
isinstance(new_model._layers[0], keras.layers.InputLayer),
add_input_layer)
self.assertEqual(new_model._is_graph_network, model._is_graph_network)
if input_shape:
# update ops from batch norm needs to be included
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
# On top of new tensor -- clone model should always have an InputLayer.
input_a = keras.Input(shape=(4,))
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(new_model._layers[0], keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
# On top of new, non-Keras tensor -- clone model should always have an
# InputLayer.
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(new_model._layers[0], keras.layers.InputLayer)
self.assertTrue(new_model._is_graph_network)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_model(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# With placeholder creation
new_model = clone_fn(model)
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
if not context.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
# saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = clone_fn(model, input_tensors=[input_a, input_b])
self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(None, val_out)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
{'testcase_name': 'clone_weights', 'share_weights': False},
{'testcase_name': 'share_weights', 'share_weights': True},
])
def test_clone_functional_with_masking(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model, layer_fn=models.share_weights)
else:
clone_fn = keras.models.clone_model
x = np.array([[[1.], [1.]], [[0.], [0.]]])
inputs = keras.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))(outputs)
model = keras.Model(inputs, outputs)
model = clone_fn(model)
model.compile(
loss='mse', optimizer=testing_utils.get_v2_optimizer('adam'),
run_eagerly=testing_utils.should_run_eagerly())
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
def test_functional_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def test_sequential_cloning_does_not_create_unnecessary_placeholders(self):
with ops.Graph().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
graph = ops.Graph()
with graph.as_default():
x = array_ops.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def _has_placeholder(graph):
ops_types = [op.type for op in graph.get_operations()]
return any('Placeholder' in s for s in ops_types)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CheckpointingTests(keras_parameterized.TestCase):
def test_optimizer_dependency(self):
model = _get_model()
opt = adam.AdamOptimizer(.01)
model.compile(
optimizer=opt, loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=2)
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
beta1_power, _ = opt._get_beta_accumulators()
self.evaluate(beta1_power.assign(12.))
model.save_weights(save_prefix)
self.evaluate(beta1_power.assign(13.))
model.load_weights(save_prefix)
self.assertEqual(12., self.evaluate(beta1_power))
@keras_parameterized.run_all_keras_modes
class TestModelBackend(keras_parameterized.TestCase):
def test_model_backend_float64_use_cases(self):
# Test case for GitHub issue 19318
floatx = keras.backend.floatx()
keras.backend.set_floatx('float64')
x = keras.Input((5,))
y = keras.layers.Dense(1)(x)
model = keras.models.Model(x, y)
model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
keras.backend.set_floatx(floatx)
class TestModelDeepCopy(test.TestCase):
def test_deep_copy_eager_mode_trainable(self):
with context.eager_mode():
x = random_ops.random_normal((32, 4))
model = TestModel(trainable=True)
model(x) # Initialize Variables.
model_copy = copy.deepcopy(model)
self.assertEqual(len(model_copy.trainable_variables), 3)
model_copy.n_outputs.assign(1200)
self.assertFalse(
np.allclose(model_copy.n_outputs.numpy(),
model.n_outputs.numpy()))
def test_deep_copy_eager_mode_not_trainable(self):
with context.eager_mode():
x = random_ops.random_normal((32, 4))
model = TestModel(trainable=False)
model(x)
model_copy = copy.deepcopy(model)
self.assertEqual(len(model_copy.trainable_variables), 2)
weights = model_copy.get_weights()
weights = [w * 4 for w in weights]
model_copy.set_weights(weights)
self.assertFalse(
np.allclose(model.get_weights()[0],
model_copy.get_weights()[0]))
class TestCloneAndBuildModel(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_non_compiled_model(self):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model = _get_model()
with self.assertRaisesRegexp(ValueError, 'has not been compiled'):
models.clone_and_build_model(model, compile_clone=True)
is_subclassed = (testing_utils.get_model_type() == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=False, in_place_reset=is_subclassed)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,))
target_a = keras.Input(shape=(4,))
new_model = models.clone_and_build_model(
model, input_tensors=input_a, target_tensors=[target_a],
compile_clone=False, in_place_reset=is_subclassed)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
"""Assert that two models have the same compile parameters."""
self.assertEqual('mse', model.loss)
self.assertTrue(
isinstance(model.optimizer,
(keras.optimizers.RMSprop,
keras.optimizer_v2.rmsprop.RMSprop)))
self.assertEqual(['acc', metrics.categorical_accuracy],
model._compile_metrics)
def _clone_and_build_test_helper(self, model, model_type):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
is_subclassed = (model_type == 'subclass')
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,), name='a')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, compile_clone=True,
in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
target_a = keras.Input(shape=(4,), name='b')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, target_tensors=[target_a],
compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_compiled(self):
model = _get_model()
model.compile(
testing_utils.get_v2_optimizer('rmsprop'), 'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
self._clone_and_build_test_helper(model, testing_utils.get_model_type())
@keras_parameterized.run_all_keras_modes
def test_clone_and_build_sequential_without_inputs_defined(self):
model = models.Sequential(_get_layers(input_shape=None))
model.compile(
testing_utils.get_v2_optimizer('rmsprop'),
'mse', metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
self._clone_and_build_test_helper(model, 'sequential')
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model.train_on_batch(inp, out)
self._clone_and_build_test_helper(model, 'sequential')
def assert_optimizer_iterations_increases(self, optimizer):
model = _get_model()
model.compile(
optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly())
global_step = keras.backend.variable(123, dtype=dtypes.int64)
clone_model = models.clone_and_build_model(
model, compile_clone=True, optimizer_iterations=global_step,
in_place_reset=(testing_utils.get_model_type() == 'subclass'))
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
clone_model.train_on_batch(inp, out)
self.assertEqual(K.eval(global_step), 124)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_tf_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_replace_keras_optimizer_iterations_variable(self):
if testing_utils.should_run_eagerly():
# This needs to be updated to run with v2 optimizers.
self.skipTest('b/120991591')
self.assert_optimizer_iterations_increases('adam')
def test_clone_optimizer_in_different_graph(self):
with ops.Graph().as_default():
with self.session():
model = testing_utils.get_small_sequential_mlp(3, 4)
optimizer = keras.optimizer_v2.adam.Adam()
model.compile(
optimizer, 'mse', metrics=['acc', metrics.categorical_accuracy],
)
model.fit(
x=np.array([[1., 2., 3., 4.]]),
y=np.array([[1., 1., 1., 1.]]),
epochs=1)
optimizer_config = optimizer.get_config()
with ops.Graph().as_default():
with self.session():
with self.assertRaisesRegexp(ValueError,
'Cannot use the given session'):
models.clone_and_build_model(model, compile_clone=True)
# The optimizer_config object allows the model to be cloned in a
# different graph.
models.clone_and_build_model(model, compile_clone=True,
optimizer_config=optimizer_config)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/models_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping {graph: set_of_freezable_variables}.
# Each set tracks objects created via `freezable_variable` in the graph.
_FREEZABLE_VARS = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from tensorflow.keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
global _FREEZABLE_VARS
_GRAPH = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
_FREEZABLE_VARS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
```
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.compat.v1.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def freezable_variable(value, shape=None, name=None):
"""A tensor-like object whose value can be updated only up until execution.
After creating the freezable variable, you can update its value by calling
`var.update_value(new_value)` (similar to a regular variable).
Unlike an actual variable, the value used during execution is the current
value at the time the execution function (`backend.function()`) was created.
This is an internal API, expected to be temporary. It is used to implement a
mutable `trainable` property for `BatchNormalization` layers, with a frozen
value after model compilation.
We don't use a plain variable in this case because we need the value used
in a specific model to be frozen after `compile` has been called
(e.g. GAN use case).
Arguments:
value: The initial value for the tensor-like object.
shape: The shape for the tensor-like object (cannot be changed).
name: The name for the tensor-like object.
Returns:
A tensor-like object with a static value that can be updated via
`x.update_value(new_value)`, up until creating an execution function
(afterwards the value is fixed).
"""
graph = get_graph()
with graph.as_default():
x = array_ops.placeholder_with_default(
value, shape=shape, name=name)
x._initial_value = value
x._current_value = value
def update_value(new_value):
x._current_value = new_value
def get_value():
return x._current_value
x.update_value = update_value
x.get_value = get_value
global _FREEZABLE_VARS
if graph not in _FREEZABLE_VARS:
_FREEZABLE_VARS[graph] = weakref.WeakSet()
_FREEZABLE_VARS[graph].add(x)
return x
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
```python
>>> import tensorflow as tf
>>> from tensorflow.keras import backend as K
>>> input = K.ones(shape=(1,3))
>>> print(input)
>>> cast_input = K.cast(input, dtype='float64')
>>> print(cast_input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
```python
>>> from tensorflow.keras import backend as K
>>> x_batch = K.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = K.batch_flatten(x_batch)
>>> K.int_shape(x_batch_flatten)
(2, 60)
```
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly():
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
with ops.init_scope():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
def is_tensor_or_composite_tensor(value):
"""Test if a passed value object is a tensor-like or composite tensor."""
return (tensor_util.is_tensor(value) or isinstance(value, np.ndarray) or
composite_tensor_utils.is_composite_or_composite_value(value))
def _try_process_scipy_sparse_input(value):
"""Converts 'value' to a SparseTensor if it is a scipy sparse matrix.
Arguments:
value: An object that may have the attributes of a scipy sparse matrix.
Returns:
Either a SparseTensor based off of 'value' or 'value' itself.
"""
try:
sparse_coo = value.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
except AttributeError:
# If we can't convert this object, it could be either a single data
# element (ie, a bool/int/float) which is OK to pass on, or something
# that we don't understand (which may or may not be OK). In either
# case, don't die here: the data standardization code will catch
# those issues.
return value
indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1)
return sparse_tensor.SparseTensor(indices, data, shape)
def try_convert_scipy_to_sparse(values):
"""Converts scipy sparse matrices in 'values' to SparseTensors, if possible.
Arguments:
values: An input or list of inputs to convert. These may be TensorLikes,
ndarrays, composite tensors, or scipy sparse values.
Returns:
An input or list of inputs where scipy sparse tensors have been converted
to tf.SparseTensors.
Raises:
ValueError: If input cannot be converted to a SparseTensor.
"""
# Convert scipy sparse data into sparse tensors.
value_structure = values
values = nest.flatten(values)
for idx, value in enumerate(values):
if not is_tensor_or_composite_tensor(value):
values[idx] = _try_process_scipy_sparse_input(value)
values = nest.pack_sequence_as(value_structure, values)
return values
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = try_convert_scipy_to_sparse(inputs)
# Ensure that input value types match any expected composite tensor types.
# TODO(momernick): Once TensorSpecs are implemented for CTs, use that here.
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._inputs_structure = inputs
inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {
i.graph
for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
if update is not None:
# `update.op` may have been None in certain cases.
updates_ops.append(update)
self._freezable_vars_to_feed = []
self._freezable_vars_values = []
freezable_vars_from_keras_graph = _FREEZABLE_VARS.get(global_graph, {})
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Keep track of the value to feed to any "freezable variables"
# created in this graph.
for old_op, new_op in lifted_map.items():
if old_op in freezable_vars_from_keras_graph:
frozen_var = old_op
if frozen_var._initial_value != frozen_var._current_value:
# We only feed a frozen_variable if its value has changed;
# otherwise it can rely on the default value of the
# underlying placeholder_with_default.
self._freezable_vars_to_feed.append(new_op)
self._freezable_vars_values.append(frozen_var._current_value)
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
self._input_references = self.inputs + self._freezable_vars_to_feed
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self._input_references + list(
exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self._input_references)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
# Convert scipy sparse data into sparse tensors.
inputs = try_convert_scipy_to_sparse(inputs)
input_values = nest.flatten(inputs, expand_composites=True)
if self._freezable_vars_values:
input_values = input_values + self._freezable_vars_values
converted_inputs = []
for tensor, value in zip(self._input_references, input_values):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance. We
# also have to call `_cpu_nograd()` since the Tensor may not be on the CPU.
# (otherwise it's just a no-op.)
return nest.pack_sequence_as(
self._outputs_structure, [x._cpu_nograd()._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
Example:
```python:
import tensorflow as tf
from tensorflow.keras import backend as K
a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
print("a: ", a)
b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3])
print("b: ", b)
loss = K.categorical_crossentropy(a, b)
print('Loss: ', loss) #Loss: tf.Tensor([0.10536055 0.8046684 0.06187541], shape=(3,), dtype=float32)
loss = K.categorical_crossentropy(a, a)
print('Loss: ', loss) #Loss: tf.Tensor([1.1920929e-07 1.1920929e-07 1.1920929e-07], shape=(3,), dtype=float32)
```
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = 1 - rank of logits.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any([_is_symbolic_tensor(v) for v in [target, output]]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if multi_worker_util.in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
| tensorflow-master | tensorflow/python/keras/backend.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import data_structures
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
# pylint: disable=not-callable
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, x):
x = self.dense1(x)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class SimpleConvTestModel(keras.Model):
def __init__(self, num_classes=10):
super(SimpleConvTestModel, self).__init__(name='test_model')
self.num_classes = num_classes
self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu')
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(num_classes, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
return self.dense1(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
test_model = Inner()
x = test_model(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
@keras_parameterized.run_all_keras_modes
class ModelSubclassingTest(keras_parameterized.TestCase):
def test_custom_build(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.uses_custom_build = False
def call(self, inputs):
return self.dense1(inputs)
def build(self, input_shape):
self.uses_custom_build = True
test_model = DummyModel()
dummy_data = array_ops.ones((32, 50))
test_model(dummy_data)
self.assertTrue(test_model.uses_custom_build, 'Model should use user '
'defined build when called.')
def test_attribute_conflict_error(self):
class ModelWithProperty(keras.Model):
@property
def read_only(self):
return 1.
m = ModelWithProperty()
with self.assertRaisesRegexp(AttributeError, 'read_only'):
m.read_only = 2.
def test_custom_build_with_fit(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.layer1 = keras.layers.Dense(10, activation='relu')
def build(self, input_shape):
self.layer2 = keras.layers.Dense(1, activation='relu')
def call(self, inputs):
return self.layer2(self.layer1(inputs))
model = DummyModel()
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=2)
self.assertLen(model.layers, 2)
self.assertLen(model.trainable_variables, 4)
def test_invalid_input_shape_build(self):
num_classes = 2
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'input shape is not one of the valid types'):
model.build(input_shape=tensor_shape.Dimension(input_dim))
def test_embed_dtype_with_subclass_build(self):
class Embedding(keras.layers.Layer):
"""An Embedding layer."""
def __init__(self, vocab_size, embedding_dim, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
def build(self, _):
self.embedding = self.add_variable(
'embedding_kernel',
shape=[self.vocab_size, self.embedding_dim],
dtype=np.float32,
initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
trainable=True)
def call(self, x):
return embedding_ops.embedding_lookup(self.embedding, x)
class EmbedModel(keras.Model):
def __init__(self, vocab_size, embed_size):
super(EmbedModel, self).__init__()
self.embed1 = Embedding(vocab_size, embed_size)
def call(self, inputs):
return self.embed1(inputs)
model = EmbedModel(100, 20)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'if your layers do not support float type inputs'):
model.build(input_shape=(35, 20))
def test_single_time_step_rnn_build(self):
dim = 4
timesteps = 1
batch_input_shape = (None, timesteps, dim)
units = 3
class SimpleRNNModel(keras.Model):
def __init__(self):
super(SimpleRNNModel, self).__init__()
self.lstm = keras.layers.LSTM(units)
def call(self, inputs):
return self.lstm(inputs)
model = SimpleRNNModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, timesteps, dim)))
def test_single_io_subclass_build(self):
num_classes = 2
input_dim = 50
batch_size = None
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_single_io_dimension_subclass_build(self):
num_classes = 2
input_dim = tensor_shape.Dimension(50)
batch_size = tensor_shape.Dimension(None)
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_multidim_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = 32
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = (batch_size,) + input_shape
model.build(input_shape=batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones(batch_input_shape))
def test_tensorshape_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32,) + input_shape))
def test_subclass_save_model(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
weights = model.get_weights()
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = SimpleConvTestModel(num_classes)
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
if h5py is not None:
model.load_weights(hdf5_format_name)
self.assertAllClose(weights, model.get_weights())
model.load_weights(tf_format_name)
self.assertAllClose(weights, model.get_weights())
def test_multi_io_subclass_build(self):
batch_size = None
num_samples = 1000
input_dim = 50
model = MultiIOTestModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim))
model.build(
input_shape=[batch_input_shape, batch_input_shape])
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
model([x1, x2])
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
def test_no_dependency(self):
class Foo(keras.Model):
def __init__(self):
super(Foo, self).__init__()
self.isdep = keras.layers.Dense(1)
self.notdep = data_structures.NoDependency(keras.layers.Dense(2))
self.notdep_var = data_structures.NoDependency(
resource_variable_ops.ResourceVariable(1., name='notdep_var'))
m = Foo()
self.assertEqual([m.isdep, m.notdep], m.layers)
self.assertEqual(1, len(m._checkpoint_dependencies))
self.assertIs(m.isdep, m._checkpoint_dependencies[0].ref)
self.assertEqual('notdep_var:0', m.notdep_var.name)
def test_extra_variable(self):
class ExtraVar(keras.Model):
def __init__(self):
super(ExtraVar, self).__init__()
self.dense = keras.layers.Dense(1)
self.var = resource_variable_ops.ResourceVariable(1.)
self.not_trainable_var = resource_variable_ops.ResourceVariable(
2., trainable=False)
def call(self, inputs):
return self.dense(inputs + self.var)
m = ExtraVar()
self.assertTrue(m.trainable)
self.assertEqual([m.dense], m.layers)
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = False
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([], m.trainable_variables)
self.assertEqual([m.var, m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = True
m(array_ops.ones([1, 1]))
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.variables)
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.weights)
self.assertLen(m.get_weights(), 4)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var],
m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
m.dense.trainable = False
self.assertEqual(
[m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.not_trainable_var],
m.non_trainable_variables)
self.assertLen(m.get_weights(), 4)
def test_add_weight_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
class MyModelCustomBuild(keras.Model):
def build(self, input_shape):
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModelCustomBuild()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
def test_add_update_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,))
def call(self, inputs):
# Unconditional
self.add_update(self.b.assign(self.b * 2))
# Conditional
self.add_update(self.c.assign(inputs[1, :]), inputs)
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
if context.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertEqual(2, len(model.updates))
self.assertEqual(1, len(model.get_updates_for(None)))
self.assertEqual(1, len(model.get_updates_for(x)))
@keras_parameterized.run_all_keras_modes
class ModelSubclassCompiledTest(keras_parameterized.TestCase):
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_dataset_iterators(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(iterator, steps=10, verbose=0)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
def test_saving(self):
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
if h5py is not None:
with self.assertRaises(ValueError):
model.load_weights(hdf5_format_name)
model.load_weights(tf_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
if h5py is not None:
model.load_weights(hdf5_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel1(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel2(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
def test_subclass_nested_in_sequential(self):
num_classes = 2
num_samples = 100
input_dim = 50
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
model = keras.Sequential([Inner()])
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 6)
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_no_loss_in_compile(self):
class InternalLossModel(keras.Model):
def __init__(self):
super(InternalLossModel, self).__init__()
self.dense = keras.layers.Dense(1)
def call(self, inputs):
out = self.dense(inputs)
self.add_loss(math_ops.reduce_sum(out))
return out
model = InternalLossModel()
x = np.ones((10, 10))
model.predict(x)
model.compile(
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x)
model.evaluate(x)
class GraphSpecificModelSubclassingTests(test.TestCase):
@test_util.run_deprecated_v1
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_updates_and_losses_for_nested_models_in_subclassed_model(self):
# Case 1: deferred-build sequential nested in subclass.
class TestModel1(keras.Model):
def __init__(self):
super(TestModel1, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel1()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 2: placeholder-sequential nested in subclass.
class TestModel2(keras.Model):
def __init__(self):
super(TestModel2, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential(
[keras.layers.BatchNormalization(axis=1, input_shape=(10,))])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel2()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 3: functional-API model nested in subclass.
inputs = keras.Input((10,))
outputs = keras.layers.BatchNormalization(axis=1)(inputs)
bn = keras.Model(inputs, outputs)
class TestModel3(keras.Model):
def __init__(self):
super(TestModel3, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = bn
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel3()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
class CustomCallModel(keras.Model):
def __init__(self):
super(CustomCallModel, self).__init__()
self.dense1 = keras.layers.Dense(1, activation='relu')
self.dense2 = keras.layers.Dense(1, activation='softmax')
def call(self, first, second, fiddle_with_output='no', training=True):
combined = self.dense1(first) + self.dense2(second)
if fiddle_with_output == 'yes':
return 10. * combined
else:
return combined
class TrainingNoDefaultModel(keras.Model):
def __init__(self):
super(TrainingNoDefaultModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training):
return self.dense1(x)
class TrainingMaskingModel(keras.Model):
def __init__(self):
super(TrainingMaskingModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training=False, mask=None):
return self.dense1(x)
@test_util.run_all_in_graph_and_eager_modes
class CustomCallSignatureTests(test.TestCase):
def test_no_inputs_in_signature(self):
model = CustomCallModel()
first = array_ops.ones([2, 3])
second = array_ops.ones([2, 5])
output = model(first, second)
self.evaluate([v.initializer for v in model.variables])
expected_output = self.evaluate(model.dense1(first) + model.dense2(second))
self.assertAllClose(expected_output, self.evaluate(output))
output = model(first, second, fiddle_with_output='yes')
self.assertAllClose(10. * expected_output, self.evaluate(output))
output = model(first, second=second, training=False)
self.assertAllClose(expected_output, self.evaluate(output))
def test_training_args_call_build(self):
input_dim = 2
model = TrainingNoDefaultModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_training_and_mask_args_call_build(self):
input_dim = 2
model = TrainingMaskingModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_custom_call_kwargs_and_build(self):
first_input_shape = (2, 3)
second_input_shape = (2, 5)
model = CustomCallModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'cannot build your model if it has positional'):
model.build(input_shape=[first_input_shape, second_input_shape])
def test_inputs_in_signature(self):
class HasInputsAndOtherPositional(keras.Model):
def call(self, inputs, some_other_arg, training=False):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
model = HasInputsAndOtherPositional()
with self.assertRaisesRegexp(
TypeError, 'everything else as a keyword argument'):
x1, x2 = keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2)
def test_kwargs_in_signature(self):
class HasKwargs(keras.Model):
def call(self, x, y=3, **kwargs):
return x
model = HasKwargs()
arg = array_ops.ones([1])
model(arg, a=3)
if not context.executing_eagerly():
self.assertEqual(len(model.inputs), 1)
def test_args_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, *args, **kwargs):
return [x] + list(args)
def compute_output_shape(self, input_shape):
return input_shape
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2, x3, a=3)
self.assertEqual(len(model.inputs), 3)
def test_args_and_keywords_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, training=True, *args, **kwargs): # pylint:disable=keyword-arg-before-vararg
return x
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(
TypeError, 'may not accept both positional arguments and '):
model(x1, x2, x3, a=3)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def test_training_no_default(self):
if context.executing_eagerly():
self.skipTest('b/120997007')
model = TrainingNoDefaultModel()
arg = array_ops.ones([1, 1])
model(arg, True)
self.assertEqual(len(model.inputs), 1)
def test_training_no_default_with_positional(self):
class TrainingNoDefaultWithPositional(keras.Model):
def call(self, x, training, positional):
return x
model = TrainingNoDefaultWithPositional()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(TypeError, 'after a non-input'):
model(x1, x2, x3)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/model_subclassing_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras testing_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import googletest
class KerasParameterizedTest(keras_parameterized.TestCase):
def test_run_with_all_model_types(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_functional()
e.testBody_subclass()
e.testBody_sequential()
self.assertLen(model_types, 3)
self.assertAllEqual(model_types, [
"functional",
"subclass",
"sequential"
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 6)
def test_run_with_all_model_types_and_extra_params(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(
[dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False)])
def testBody(self, with_brackets):
with_brackets = "with_brackets" if with_brackets else "without_brackets"
model_types.append((with_brackets, testing_utils.get_model_type()))
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_0_functional()
e.testBody_0_subclass()
e.testBody_0_sequential()
e.testBody_1_functional()
e.testBody_1_subclass()
e.testBody_1_sequential()
self.assertLen(model_types, 6)
self.assertAllEqual(model_types, [
("with_brackets", "functional"),
("with_brackets", "subclass"),
("with_brackets", "sequential"),
("without_brackets", "functional"),
("without_brackets", "subclass"),
("without_brackets", "sequential"),
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 12)
def test_run_with_all_model_types_exclude_one(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types(exclude_models="sequential")
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 2)
self.assertAllEqual(model_types, [
"functional",
"subclass"
])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 4)
def test_run_with_all_model_types_exclude_multiple(self):
model_types = []
models = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types(
exclude_models=["sequential", "functional"])
def testBody(self):
model_types.append(testing_utils.get_model_type())
models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 1)
self.assertAllEqual(model_types, [
"subclass"
])
# Validate that the models are what they should be
self.assertFalse(models[0]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 2)
def test_run_all_keras_modes(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly))
e = ExampleTest()
if not tf2.enabled():
e.testBody_v1_graph()
e.testBody_v2_eager()
e.testBody_v2_function()
if not tf2.enabled():
self.assertLen(l, 3)
self.assertAllEqual(l, [
("graph", False),
("eager", True),
("eager", False),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 6)
else:
self.assertLen(l, 2)
self.assertAllEqual(l, [
("eager", True),
("eager", False),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 4)
def test_run_all_keras_modes_extra_params(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
[dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False)])
def testBody(self, with_brackets):
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((with_brackets, mode, should_run_eagerly))
e = ExampleTest()
if not tf2.enabled():
e.testBody_0_v1_graph()
e.testBody_1_v1_graph()
e.testBody_0_v2_eager()
e.testBody_0_v2_function()
e.testBody_1_v2_eager()
e.testBody_1_v2_function()
expected_combinations = {
("with_brackets", "eager", True),
("with_brackets", "eager", False),
("without_brackets", "eager", True),
("without_brackets", "eager", False),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("with_brackets", "graph", False),
("without_brackets", "graph", False),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_always_skip_v1(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly))
e = ExampleTest()
if hasattr(e, "testBody_v1_graph"):
e.testBody_v1_graph()
if hasattr(e, "testBody_v2_eager"):
e.testBody_v2_eager()
if hasattr(e, "testBody_v2_function"):
e.testBody_v2_function()
self.assertLen(l, 2)
self.assertEqual(set(l), {
("eager", True),
("eager", False),
})
def test_run_all_keras_modes_with_all_model_types(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly, testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_v2_eager_functional()
e.testBody_v2_function_functional()
e.testBody_v2_eager_sequential()
e.testBody_v2_function_sequential()
e.testBody_v2_eager_subclass()
e.testBody_v2_function_subclass()
if not tf2.enabled():
e.testBody_v1_graph_functional()
e.testBody_v1_graph_sequential()
e.testBody_v1_graph_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_model_types_with_all_keras_modes(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly, testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_functional_v2_eager()
e.testBody_functional_v2_function()
e.testBody_sequential_v2_eager()
e.testBody_sequential_v2_function()
e.testBody_subclass_v2_eager()
e.testBody_subclass_v2_function()
if not tf2.enabled():
e.testBody_functional_v1_graph()
e.testBody_sequential_v1_graph()
e.testBody_subclass_v1_graph()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class(self):
l = []
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@parameterized.named_parameters(dict(testcase_name="_arg",
arg=True))
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly, testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_function_subclass()
if not tf2.enabled():
e.testBody_arg_v1_graph_functional()
e.testBody_arg_v1_graph_sequential()
e.testBody_arg_v1_graph_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
l = []
@keras_parameterized.run_with_all_model_types
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="_arg",
arg=True))
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly, testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_function_subclass()
if not tf2.enabled():
e.testBody_arg_v1_graph_functional()
e.testBody_arg_v1_graph_sequential()
e.testBody_arg_v1_graph_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
def test_run_all_keras_modes_extra_params_2(self, arg):
self.assertEqual(arg, True)
@keras_parameterized.run_with_all_model_types
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
def test_run_with_all_model_types_extra_params_2(self, arg):
self.assertEqual(arg, True)
if __name__ == "__main__":
googletest.main()
| tensorflow-master | tensorflow/python/keras/keras_parameterized_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras weights constraints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
def get_test_values():
return [0.1, 0.5, 3, 8, 1e-7]
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100. - 50.
example_array[0, 0] = 0. # 0 could possibly cause trouble
return example_array
@test_util.run_all_in_graph_and_eager_modes
class KerasConstraintsTest(test.TestCase):
def test_serialization(self):
all_activations = ['max_norm', 'non_neg',
'unit_norm', 'min_max_norm']
for name in all_activations:
fn = keras.constraints.get(name)
ref_fn = getattr(keras.constraints, name)()
assert fn.__class__ == ref_fn.__class__
config = keras.constraints.serialize(fn)
fn = keras.constraints.deserialize(config)
assert fn.__class__ == ref_fn.__class__
def test_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = keras.constraints.max_norm(m)
normed = norm_instance(keras.backend.variable(array))
assert np.all(keras.backend.eval(normed) < m)
# a more explicit example
norm_instance = keras.constraints.max_norm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array(
[[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0],
[2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
x_normed_actual = keras.backend.eval(
norm_instance(keras.backend.variable(x)))
self.assertAllClose(x_normed_actual, x_normed_target, rtol=1e-05)
def test_non_neg(self):
non_neg_instance = keras.constraints.non_neg()
normed = non_neg_instance(keras.backend.variable(get_example_array()))
assert np.all(np.min(keras.backend.eval(normed), axis=1) == 0.)
def test_unit_norm(self):
unit_norm_instance = keras.constraints.unit_norm()
normalized = unit_norm_instance(keras.backend.variable(get_example_array()))
norm_of_normalized = np.sqrt(
np.sum(keras.backend.eval(normalized)**2, axis=0))
# In the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.
largest_difference = np.max(np.abs(difference))
assert np.abs(largest_difference) < 10e-5
def test_min_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = keras.constraints.min_max_norm(
min_value=m, max_value=m * 2)
normed = norm_instance(keras.backend.variable(array))
value = keras.backend.eval(normed)
l2 = np.sqrt(np.sum(np.square(value), axis=0))
assert not l2[l2 < m]
assert not l2[l2 > m * 2 + 1e-5]
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/constraints_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Code for model cloning, plus model-related API entries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import saving
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
# API entries importable from `keras.models`:
Model = training.Model # pylint: disable=invalid-name
Sequential = sequential.Sequential # pylint: disable=invalid-name
save_model = saving.save_model
load_model = saving.load_model
model_from_config = saving.model_from_config
model_from_yaml = saving.model_from_yaml
model_from_json = saving.model_from_json
# Callable used to clone a layer with weights preserved.
def share_weights(layer):
return layer
def _clone_layer(layer):
return layer.__class__.from_config(layer.get_config())
def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Input layers are always cloned.
Arguments:
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
layer_fn: callable to be applied on non-input layers in the model. By
default it clones the layer. Another example is to preserve the layer
to share the weights. This is required when we create a per-replica
copy of the model with distribution strategy; we want the weights to
be shared but still feed inputs separately so we create new input
layers.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value or `layer_fn`
argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
if not model._is_graph_network:
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'but got a subclass model instead.')
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: corresponding_tensor}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(
batch_shape=layer._batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history.layer
layer_map[layer] = newly_created_input_layer
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = nest.flatten(input_tensors)
input_tensors_ = []
for i in range(len(input_tensors)):
input_tensor = input_tensors[i]
if not K.is_keras_tensor(input_tensor):
original_input_layer = model._input_layers[i]
name = original_input_layer.name
input_tensor = Input(tensor=input_tensor,
name='input_wrapper_for_' + name)
input_tensors_.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history.layer
layer_map[original_input_layer] = newly_created_input_layer
else:
input_tensors_.append(input_tensor)
input_tensors = input_tensors_
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = y
if not callable(layer_fn):
raise ValueError('Expected `layer_fn` argument to be a callable.')
new_nodes = set()
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
new_layer = layer_fn(layer)
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
if all(
tensor in tensor_map for tensor in nest.flatten(node.input_tensors)):
computed_tensors = nest.map_structure(lambda t: tensor_map[t],
node.input_tensors)
# Call layer.
kwargs = node.arguments or {}
output_tensors = layer(computed_tensors, **kwargs)
# Thread-safe way to keep track of what node was created.
first_output_tensor = nest.flatten(output_tensors)[0]
new_nodes.add(
layer._inbound_nodes[first_output_tensor._keras_history.node_index])
for x, y in zip(
nest.flatten(node.output_tensors), nest.flatten(output_tensors)):
tensor_map[x] = y
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
output_tensors.append(tensor_map[x])
input_tensors = nest.pack_sequence_as(model._nested_inputs, input_tensors)
output_tensors = nest.pack_sequence_as(model._nested_outputs, output_tensors)
model = Model(input_tensors, output_tensors, name=model.name)
# Layers not directly tied to outputs of the Model, such as loss layers
# created in `add_loss`.
ancillary_layers = [
layer for layer in layer_map.values() if layer not in model.layers
]
if ancillary_layers:
nodes = set(
nest.flatten([layer._inbound_nodes for layer in ancillary_layers]))
relevant_nodes = list(nodes.intersection(new_nodes))
model._insert_layers(ancillary_layers, relevant_nodes=relevant_nodes)
return model
def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
layer_fn: callable to be applied on non-input layers in the model. By
default it clones the layer. Another example is to preserve the layer
to share the weights. This is required when we create a per-replica
copy of the model with distribution strategy; we want the weights to
be shared but still feed inputs separately so we create new input
layers.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value or `layer_fn`
argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
if not callable(layer_fn):
raise ValueError('Expected `layer_fn` argument to be a callable.')
# Use model._layers to ensure that all layers are cloned. The model's layers
# property will exclude the initial InputLayer (if it exists) in the model,
# resulting in a different Sequential model structure.
if input_tensors is None:
layers = []
for layer in model._layers:
if isinstance(layer, InputLayer):
layers.append(_clone_layer(layer))
else:
layers.append(layer_fn(layer))
return Sequential(layers=layers, name=model.name)
else:
# If input tensors are provided, the original model's InputLayer is
# overwritten with a different InputLayer.
layers = [
layer_fn(layer)
for layer in model._layers
if not isinstance(layer, InputLayer)
]
if len(generic_utils.to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
if isinstance(input_tensors, tuple):
input_tensors = list(input_tensors)
x = generic_utils.to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history.layer
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history.layer
return Sequential(layers=[input_layer] + layers, name=model.name)
@keras_export('keras.models.clone_model')
def clone_model(model, input_tensors=None, clone_function=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors or InputLayer objects
to build the model upon. If not provided,
placeholders will be created.
clone_function: Callable to be used to clone each layer in the target
model (except `InputLayer` instances). It takes as argument the layer
instance to be cloned, and returns the corresponding layer instance to
be used in the model copy. If unspecified, this callable defaults to
the following serialization/deserialization function:
`lambda layer: layer.__class__.from_config(layer.get_config())`.
By passing a custom callable, you can customize your copy of the
model, e.g. by wrapping certain layers of interest (you might want to
replace all `LSTM` instances with equivalent
`Bidirectional(LSTM(...))` instances, for example).
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights. The cloned model might behave
differently from the original model if a custom clone_function
modifies the layer.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if clone_function is None:
clone_function = _clone_layer
if isinstance(model, Sequential):
return _clone_sequential_model(
model, input_tensors=input_tensors, layer_fn=clone_function)
else:
return _clone_functional_model(
model, input_tensors=input_tensors, layer_fn=clone_function)
# "Clone" a subclassed model by reseting all of the attributes.
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
# Skip the check of methods in tf.Module since they basically
# recursively query all the other attributes within same module.
if name == 'submodules':
continue
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model.layers
if hasattr(value, 'layers') and value.layers:
raise ValueError('We do not support the use of nested layers '
'in `model_to_estimator` at this time. Found nested '
'layer: %s' % value)
elif isinstance(
value, (list, tuple)) and name not in ('layers', '_layers', 'metrics',
'_compile_metric_functions',
'_output_loss_metrics'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
setattr_tracking = model._setattr_tracking
model._setattr_tracking = False
model._layers = []
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
model._layers.append(fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'total_loss',
'optimizer',
'train_function',
'test_function',
'predict_function',
'_training_endpoints',
'_collected_trainable_weights',
'_feed_inputs',
'_feed_input_names',
'_feed_input_shapes',
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
model._original_attributes_cache = attributes_cache
_reset_build_compile_trackers(model)
model._setattr_tracking = setattr_tracking
def _reset_build_compile_trackers(model):
"""Reset state trackers for model.
Note that we do not actually zero out attributes such as optimizer,
but instead rely on the expectation that all of the attrs will be
over-written on calling build/compile/etc. This is somewhat fragile,
insofar as we check elsewhere for the presence of these attributes as
evidence of having been built/compiled/etc. Pending a better way to do this,
we reset key attributes here to allow building and compiling.
Args:
model: the model that is being reset
"""
# Reset build state
model.built = False
model.inputs = None
model.outputs = None
# Reset compile state
model._is_compiled = False # pylint:disable=protected-access
model.optimizer = None
def in_place_subclassed_model_state_restoration(model):
"""Restores the original state of a model after it was "reset".
This undoes this action of `_in_place_subclassed_model_reset`, which is called
in `clone_and_build_model` if `in_place_reset` is set to True.
Args:
model: Instance of a Keras model created via subclassing, on which
`_in_place_subclassed_model_reset` was previously called.
"""
assert not model._is_graph_network
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
# Models have sticky attribute assignment, so we want to be careful to add
# back the previous attributes and track Layers by their original names
# without adding dependencies on "utility" attributes which Models exempt
# when they're constructed.
setattr_tracking = model._setattr_tracking
model._setattr_tracking = False
model._layers = []
for name, value in model._original_attributes_cache.items():
setattr(model, name, value)
if isinstance(value, Layer):
model._layers.append(value)
model._original_attributes_cache = None
model._setattr_tracking = setattr_tracking
else:
# Restore to the state of a never-called model.
_reset_build_compile_trackers(model)
def clone_and_build_model(
model, input_tensors=None, target_tensors=None, custom_objects=None,
compile_clone=True, in_place_reset=False, optimizer_iterations=None,
optimizer_config=None):
"""Clone a `Model` and build/compile it with the same settings used before.
This function can be be run in the same graph or in a separate graph from the
model. When using a separate graph, `in_place_reset` must be `False`.
Note that, currently, the clone produced from this function may not work with
TPU DistributionStrategy. Try at your own risk.
Args:
model: `tf.keras.Model` object. Can be Functional, Sequential, or
sub-classed.
input_tensors: Optional list of input tensors to build the model upon. If
not provided, placeholders will be created.
target_tensors: Optional list of target tensors for compiling the model. If
not provided, placeholders will be created.
custom_objects: Optional dictionary mapping string names to custom classes
or functions.
compile_clone: Boolean, whether to compile model clone (default `True`).
in_place_reset: Boolean, whether to reset the model in place. Only used if
the model is a subclassed model. In the case of a subclassed model,
this argument must be set to `True` (default `False`). To restore the
original model, use the function
`in_place_subclassed_model_state_restoration(model)`.
optimizer_iterations: An iterations variable that will be incremented by the
optimizer if the clone is compiled. This argument is used when a Keras
model is cloned into an Estimator model function, because Estimators
create their own global step variable.
optimizer_config: Optimizer config dictionary returned from `get_config()`.
This argument should be defined if `clone_and_build_model` is called in
a different graph or session from the original model, and the optimizer is
an instance of `OptimizerV2`.
Returns:
Clone of the model.
Raises:
ValueError: Cloning fails in the following cases
- cloning a subclassed model with `in_place_reset` set to False.
- compiling the clone when the original model has not been compiled.
"""
# Grab optimizer now, as we reset-in-place for subclassed models, but
# want to maintain access to the original optimizer.
orig_optimizer = model.optimizer
if compile_clone and not orig_optimizer:
raise ValueError(
'Error when cloning model: compile_clone was set to True, but the '
'original model has not been compiled.')
if model._is_graph_network or isinstance(model, Sequential):
if custom_objects:
with CustomObjectScope(custom_objects):
clone = clone_model(model, input_tensors=input_tensors)
else:
clone = clone_model(model, input_tensors=input_tensors)
if all([isinstance(clone, Sequential),
not clone._is_graph_network,
getattr(model, '_build_input_shape', None) is not None]):
# Set model inputs to build the model and add input/output properties.
# TODO(kathywu): Add multiple placeholders to handle edge case where
# sequential model has multiple inputs.
clone._set_inputs(
K.placeholder(model._build_input_shape, dtype=model.inputs[0].dtype))
else:
if not in_place_reset:
raise ValueError(
'This model is a subclassed model. '
'Such a model cannot be cloned, but there is a workaround where '
'the model is reset in-place. To use this, please set the argument '
'`in_place_reset` to `True`. This will reset the attributes in the '
'original model. To restore the attributes, call '
'`in_place_subclassed_model_state_restoration(model)`.')
clone = model
_in_place_subclassed_model_reset(clone)
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1:
input_tensors = input_tensors[0]
clone._set_inputs(input_tensors)
if compile_clone:
if isinstance(orig_optimizer, optimizers.TFOptimizer):
optimizer = optimizers.TFOptimizer(
orig_optimizer.optimizer, optimizer_iterations)
K.track_tf_optimizer(optimizer)
else:
optimizer_config = optimizer_config or orig_optimizer.get_config()
optimizer = orig_optimizer.__class__.from_config(optimizer_config)
if optimizer_iterations is not None:
optimizer.iterations = optimizer_iterations
clone.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=target_tensors)
return clone
| tensorflow-master | tensorflow/python/keras/models.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn [more
about
embeddings](https://www.tensorflow.org/programmers_guide/embedding)
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# One profiler session is running if it is True.
self._is_profiling = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph(), step=0)
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
elif (not self._is_profiling and
self._total_batches_seen == self._profile_batch - 1):
profiler.start()
self._is_profiling = True
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
profiler.start()
self._is_profiling = True
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
if self._is_profiling:
profiler.save(self.log_dir, profiler.stop())
self._is_profiling = False
self.writer.close()
| tensorflow-master | tensorflow/python/keras/callbacks_v1.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import callbacks_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestTensorBoardV1(test.TestCase):
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=temp_dir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [
callbacks_v1.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True,
write_grads=True,
batch_size=5)
]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
callbacks_v1.TensorBoard._init_writer = _init_writer
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
callbacks_v1.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
tb_cbk.on_train_end()
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
tb_cbk.on_train_end()
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
tb_cbk.on_train_end()
# Batch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
# Integer mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/callbacks_v1_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.use_deterministic_cudnn
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, validate_training=True,
adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Shape tuple for the expected shape of the output.
expected_output_dtype: Data type expected for the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
# check shape inference
model = keras.models.Model(x, y)
expected_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
expected_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
def compare_shapes(expected, actual):
for expected_dim, actual_dim in zip(expected, actual):
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
compare_shapes(expected_output_shape, actual_output_shape)
compare_shapes(expected_output_signature.shape, actual_output_shape)
if expected_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
expected_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# train(). This was causing some error for layer with Defun as it body.
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers):
super(_SubclassModel, self).__init__()
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func):
super(_SubclassModelCustomBuild, self).__init__()
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers, input_shape=None, input_dtype=None):
"""Builds a model from a sequence of layers."""
model_type = get_model_type()
if model_type == 'subclass':
return _SubclassModel(layers)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func)
if model_type == 'sequential':
model = keras.models.Sequential()
if input_shape:
model.add(keras.layers.InputLayer(input_shape=input_shape,
dtype=input_dtype))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(shape=input_shape, dtype=input_dtype)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
| tensorflow-master | tensorflow/python/keras/testing_utils.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
def softmax(x, axis=-1):
"""The softmax activation function transforms the outputs so that all values are in
range (0, 1) and sum to 1. It is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution. The softmax of x is calculated by
exp(x)/tf.reduce_sum(exp(x)).
Arguments:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
Raises:
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return nn.softmax(x)
elif ndim > 2:
e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
'Received input: %s' % (x,))
@keras_export('keras.activations.elu')
def elu(x, alpha=1.0):
"""Exponential linear unit.
Arguments:
x: Input tensor.
alpha: A scalar, slope of negative section.
Returns:
The exponential linear activation: `x` if `x > 0` and
`alpha * (exp(x)-1)` if `x < 0`.
Reference:
- [Fast and Accurate Deep Network Learning by Exponential
Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
"""
return K.elu(x, alpha)
@keras_export('keras.activations.selu')
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is:
`scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
where `alpha` and `scale` are pre-defined constants
(`alpha = 1.67326324`
and `scale = 1.05070098`).
The SELU activation function multiplies `scale` > 1 with the
`[elu](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/activations/elu)`
(Exponential Linear Unit (ELU)) to ensure a slope larger than one
for positive net inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see [`lecun_normal` initialization]
(https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal))
and the number of inputs is "large enough"
(see references for more information).

(Courtesy: Blog on Towards DataScience at
https://towardsdatascience.com/selu-make-fnns-great-again-snn-8d61526802a9)
Example Usage:
```python3
n_classes = 10 #10-class problem
model = models.Sequential()
model.add(Dense(64, kernel_initializer='lecun_normal', activation='selu',
input_shape=(28, 28, 1))))
model.add(Dense(32, kernel_initializer='lecun_normal', activation='selu'))
model.add(Dense(16, kernel_initializer='lecun_normal', activation='selu'))
model.add(Dense(n_classes, activation='softmax'))
```
Arguments:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
# Note
- To be used together with the initialization "[lecun_normal]
(https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal)".
- To be used together with the dropout variant "[AlphaDropout]
(https://www.tensorflow.org/api_docs/python/tf/keras/layers/AlphaDropout)".
References:
[Self-Normalizing Neural Networks (Klambauer et al, 2017)]
(https://arxiv.org/abs/1706.02515)
"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * K.elu(x, alpha)
@keras_export('keras.activations.softplus')
def softplus(x):
"""Softplus activation function.
Arguments:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return nn.softplus(x)
@keras_export('keras.activations.softsign')
def softsign(x):
"""Softsign activation function.
Arguments:
x: Input tensor.
Returns:
The softplus activation: `x / (abs(x) + 1)`.
"""
return nn.softsign(x)
@keras_export('keras.activations.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified Linear Unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.tanh')
def tanh(x):
"""Hyperbolic Tangent (tanh) activation function.
For example:
```python
# Constant 1-D tensor populated with value list.
a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
b = tf.keras.activations.tanh(a) #[-0.9950547,-0.7615942,
0.,0.7615942,0.9950547]
```
Arguments:
x: Input tensor.
Returns:
A tensor of same shape and dtype of input `x`.
The tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) -
exp(-x))/(exp(x) + exp(-x)))`.
"""
return nn.tanh(x)
@keras_export('keras.activations.sigmoid')
def sigmoid(x):
"""Sigmoid.
Applies the sigmoid activation function. The sigmoid function is defined as
1 divided by (1 + exp(-x)). It's curve is like an "S" and is like a smoothed
version of the Heaviside (Unit Step Function) function. For small values
(<-5) the sigmoid returns a value close to zero and for larger values (>5)
the result of the function gets close to 1.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Sigmoid activation function.
Arguments:
x: Input tensor.
Returns:
The sigmoid activation: `(1.0 / (1.0 + exp(-x)))`.
"""
return nn.sigmoid(x)
@keras_export('keras.activations.exponential')
def exponential(x):
"""Exponential activation function.
Arguments:
x: Input tensor.
Returns:
The exponential activation: `exp(x)`.
"""
return math_ops.exp(x)
@keras_export('keras.activations.hard_sigmoid')
def hard_sigmoid(x):
"""Hard sigmoid activation function.
Faster to compute than sigmoid activation.
Arguments:
x: Input tensor.
Returns:
Hard sigmoid activation:
- `0` if `x < -2.5`
- `1` if `x > 2.5`
- `0.2 * x + 0.5` if `-2.5 <= x <= 2.5`.
"""
return K.hard_sigmoid(x)
@keras_export('keras.activations.linear')
def linear(x):
"""Linear activation function.
Arguments:
x: Input tensor.
Returns:
The linear activation: `x`.
"""
return x
@keras_export('keras.activations.serialize')
def serialize(activation):
if (hasattr(activation, '__name__')
and activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
@keras_export('keras.activations.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
def get(identifier):
if identifier is None:
return linear
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
elif isinstance(identifier, dict):
return deserialize_keras_object(
identifier,
printable_module_name='activation')
else:
raise TypeError(
'Could not interpret activation function identifier: {}'.format(
repr(identifier)))
| tensorflow-master | tensorflow/python/keras/activations.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import weakref
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.adam import AdamOptimizer
def _get_model(input_dim, num_hidden, output_dim):
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(keras.layers.Dense(output_dim, activation='softmax'))
return model
class KerasOptimizersTest(test.TestCase):
def _test_optimizer(self, optimizer, target=0.75):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = _get_model(x_train.shape[1], 20, y_train.shape[1])
model.compile(
loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations), 0)
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations),
126) # 63 steps per epoch
self.assertGreaterEqual(history.history['acc'][-1], target)
config = keras.optimizers.serialize(optimizer)
optim = keras.optimizers.deserialize(config)
new_config = keras.optimizers.serialize(optim)
new_config['class_name'] = new_config['class_name'].lower()
new_config['config'].pop('name', None)
if 'amsgrad' not in config['config']:
new_config['config'].pop('amsgrad', None)
if 'decay' in new_config['config'] and 'schedule_decay' in config['config']:
new_config['config']['schedule_decay'] = new_config['config'].pop('decay')
if 'momentum' not in config['config']:
new_config['config'].pop('momentum', None)
if 'centered' not in config['config']:
new_config['config'].pop('centered', None)
self.assertDictEqual(config, new_config)
# Test constraints.
model = keras.models.Sequential()
dense = keras.layers.Dense(
10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2.,
activation='relu')
model.add(dense)
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations),
126) # Using same optimizer from before
model.train_on_batch(x_train[:10], y_train[:10])
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations), 127)
kernel, bias = dense.get_weights()
np.testing.assert_allclose(kernel, 1., atol=1e-3)
np.testing.assert_allclose(bias, 2., atol=1e-3)
def test_sgd(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.SGD())
def test_momentum(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
def test_rmsprop(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.RMSprop())
self._test_optimizer(keras.optimizers.RMSprop(decay=1e-3))
def test_adagrad(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adagrad())
self._test_optimizer(keras.optimizers.Adagrad(decay=1e-3))
def test_adadelta(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adadelta(), target=0.6)
# Accuracy seems dependent on the initialization. Even adding
# tf.compat.v1.Print nodes in the graph seemed to affect the
# initialization seed, and hence the accuracy.
self._test_optimizer(keras.optimizers.Adadelta(decay=1e-3), target=0.4)
def test_adam(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adam())
# Accuracy seems dependent on the seed initialization.
# TODO(b/121051441): fix test flakiness.
self._test_optimizer(keras.optimizers.Adam(decay=1e-3), target=0.73)
self._test_optimizer(keras.optimizers.Adam(amsgrad=True))
def test_adamax(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Adamax())
self._test_optimizer(keras.optimizers.Adamax(decay=1e-3))
def test_nadam(self):
with self.cached_session():
self._test_optimizer(keras.optimizers.Nadam())
def test_clipnorm(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=0.5))
def test_clipvalue(self):
with self.cached_session():
self._test_optimizer(
keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5))
def test_tf_optimizer(self):
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
model.add(keras.layers.Dense(
2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1)))
# This is possible
model.compile(loss='mean_squared_error', optimizer=optimizer)
keras.backend.track_tf_optimizer(optimizer)
model.fit(np.random.random((5, 3)),
np.random.random((5, 2)),
epochs=1,
batch_size=5,
verbose=0)
# not supported
with self.assertRaises(NotImplementedError):
_ = optimizer.weights
with self.assertRaises(NotImplementedError):
optimizer.get_config()
with self.assertRaises(NotImplementedError):
optimizer.from_config(None)
def test_optimizer_garbage_collection(self):
graph = ops.Graph()
with graph.as_default():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
keras.backend.track_tf_optimizer(optimizer)
optimizer_weak = weakref.ref(optimizer)
graph_weak = weakref.ref(graph)
del graph, optimizer
gc.collect()
# Check that the weak references are dead now.
self.assertIs(graph_weak(), None)
self.assertIs(optimizer_weak(), None)
def test_tf_optimizer_iterations(self):
with self.cached_session():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
model.add(keras.layers.Dense(
2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1)))
model.compile(loss='mean_squared_error', optimizer=optimizer)
keras.backend.track_tf_optimizer(optimizer)
self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 0)
model.fit(np.random.random((55, 3)),
np.random.random((55, 2)),
epochs=1,
batch_size=5,
verbose=0)
self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 11)
if not context.executing_eagerly():
# TODO(kathywu): investigate why training with an array input and
# setting the argument steps_per_epoch does not work in eager mode.
model.fit(np.random.random((20, 3)),
np.random.random((20, 2)),
steps_per_epoch=8,
verbose=0)
self.assertEqual(
keras.backend.get_value(model.optimizer.iterations), 19)
def test_negative_clipvalue_or_clipnorm(self):
with self.assertRaises(ValueError):
_ = keras.optimizers.SGD(lr=0.01, clipvalue=-0.5)
with self.assertRaises(ValueError):
_ = keras.optimizers.Adam(clipnorm=-2.0)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/optimizers_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _run_load_weights_on_restart_test_common_iterations(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
initial_epochs = 3
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/callbacks_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class KerasInitializersTest(test.TestCase):
def _runner(self, init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = keras.backend.variable(init(shape))
output = keras.backend.get_value(variable)
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
variable = keras.backend.variable(reconstructed_init(shape))
output_2 = keras.backend.get_value(variable)
self.assertAllClose(output, output_2, atol=1e-4)
def test_uniform(self):
tensor_shape = (9, 6, 7)
with self.cached_session():
self._runner(
keras.initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),
tensor_shape,
target_mean=0.,
target_max=1,
target_min=-1)
def test_normal(self):
tensor_shape = (8, 12, 99)
with self.cached_session():
self._runner(
keras.initializers.RandomNormalV2(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0.,
target_std=1)
def test_truncated_normal(self):
tensor_shape = (12, 99, 7)
with self.cached_session():
self._runner(
keras.initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),
tensor_shape,
target_mean=0.,
target_max=2,
target_min=-2)
def test_constant(self):
tensor_shape = (5, 6, 4)
with self.cached_session():
self._runner(
keras.initializers.ConstantV2(2.),
tensor_shape,
target_mean=2,
target_max=2,
target_min=2)
def test_lecun_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
keras.initializers.lecun_uniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
keras.initializers.GlorotUniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
keras.initializers.he_uniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_lecun_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
keras.initializers.lecun_normalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
keras.initializers.GlorotNormalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
keras.initializers.he_normalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_orthogonal(self):
tensor_shape = (20, 20)
with self.cached_session():
self._runner(
keras.initializers.OrthogonalV2(seed=123),
tensor_shape,
target_mean=0.)
def test_identity(self):
with self.cached_session():
tensor_shape = (3, 4, 5)
with self.assertRaises(ValueError):
self._runner(
keras.initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
tensor_shape = (3, 3)
self._runner(
keras.initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
def test_zero(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
keras.initializers.ZerosV2(),
tensor_shape,
target_mean=0.,
target_max=0.)
def test_one(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
keras.initializers.OnesV2(),
tensor_shape,
target_mean=1.,
target_max=1.)
def test_default_random_uniform(self):
ru = keras.initializers.get('uniform')
self.assertEqual(ru.minval, -0.05)
self.assertEqual(ru.maxval, 0.05)
def test_default_random_normal(self):
rn = keras.initializers.get('normal')
self.assertEqual(rn.mean, 0.0)
self.assertEqual(rn.stddev, 0.05)
def test_default_truncated_normal(self):
tn = keras.initializers.get('truncated_normal')
self.assertEqual(tn.mean, 0.0)
self.assertEqual(tn.stddev, 0.05)
def test_initializer_v2_get(self):
tf2_force_enabled = tf2._force_enable # pylint: disable=protected-access
try:
tf2.enable()
rn = keras.initializers.get('random_normal')
self.assertIn('init_ops_v2', rn.__class__.__module__)
finally:
tf2._force_enable = tf2_force_enabled # pylint: disable=protected-access
def test_custom_initializer_saving(self):
def my_initializer(shape, dtype=None):
return array_ops.ones(shape, dtype=dtype)
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_initializer=my_initializer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_initializer': my_initializer})
self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)
@test_util.run_v2_only
def test_load_external_variance_scaling_v2(self):
external_serialized_json = {
'class_name': 'VarianceScaling',
'config': {
'distribution': 'normal',
'mode': 'fan_avg',
'scale': 1.0,
'seed': None
}
}
initializer = keras.initializers.deserialize(external_serialized_json)
self.assertEqual(initializer.distribution, 'truncated_normal')
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/initializers_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase,
parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_multi_input_model_from(self, layer1, layer2):
input_1 = keras.layers.Input(shape=(DATA_DIM,))
input_2 = keras.layers.Input(shape=(DATA_DIM,))
out1 = layer1(input_1)
out2 = layer2(input_2)
out = keras.layers.Average()([out1, out2])
model = keras.models.Model([input_1, input_2], out)
model.add_loss(keras.backend.mean(out2))
model.add_loss(math_ops.reduce_sum(input_1))
return model
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
('l2_zero', keras.regularizers.l2(0.)),
])
def test_activity_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
def my_regularizer(weights):
return math_ops.reduce_sum(math_ops.abs(weights))
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_regularizer': my_regularizer})
self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
model = self.create_multi_input_model_from(dense_layer, dense_layer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.losses), 5)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_model(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))
model = self.create_multi_input_model_from(dummy_model, dummy_model)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.losses), 6)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer_in_different_models(self, regularizer):
shared_dense = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
models = []
for _ in range(2):
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
unshared_dense = keras.layers.Dense(
NUM_CLASSES, kernel_regularizer=regularizer)
out = unshared_dense(shared_dense(input_tensor))
models.append(keras.models.Model(input_tensor, out))
model = self.create_multi_input_model_from(
layer1=models[0], layer2=models[1])
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.losses), 14)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/regularizers_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
Detailed documentation and user guides are available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras import activations
from tensorflow.python.keras import applications
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import callbacks_v1
from tensorflow.python.keras import constraints
from tensorflow.python.keras import datasets
from tensorflow.python.keras import estimator
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import ops
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import preprocessing
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import utils
from tensorflow.python.keras import wrappers
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.models import Sequential
from tensorflow.python.util.tf_export import keras_export
if tf2.enabled():
__version__ = '2.3.0-tf'
else:
__version__ = '2.2.4-tf'
keras_export('keras.__version__').export_constant(__name__, '__version__')
del absolute_import
del division
del print_function
| tensorflow-master | tensorflow/python/keras/__init__.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import Model
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import util as trackable_utils
@test_util.run_all_in_graph_and_eager_modes
class KerasSumTest(test.TestCase):
def test_sum(self):
m = metrics.Sum(name='my_sum')
# check config
self.assertEqual(m.name, 'my_sum')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 1)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
def test_sum_with_sample_weight(self):
m = metrics.Sum(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50)
self.assertEqual(self.evaluate(m.total), 50)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 4)
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2)
# result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2
self.assertAlmostEqual(result, 63.75, 2)
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Sum()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(result, 50)
self.assertEqual(self.evaluate(m.total), 50)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Sum()
checkpoint = trackable_utils.Checkpoint(sum=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint sum object (= 300)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(600., self.evaluate(m.result()))
# restore to a different checkpoint sum object
restore_sum = metrics.Sum()
restore_checkpoint = trackable_utils.Checkpoint(sum=restore_sum)
status = restore_checkpoint.restore(save_path)
restore_update = restore_sum(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(600., self.evaluate(restore_sum.result()))
@keras_parameterized.run_all_keras_modes
class KerasMeanTest(keras_parameterized.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 2)
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
def test_mean_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = trackable_utils.Checkpoint(mean=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = trackable_utils.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
def test_multiple_instances(self):
m = metrics.Mean()
m2 = metrics.Mean()
self.assertEqual(m.name, 'mean')
self.assertEqual(m2.name, 'mean')
self.assertEqual([v.name for v in m.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count']))
self.assertEqual([v.name for v in m2.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count'], name_suffix='_1'))
self.evaluate(variables.variables_initializer(m.variables))
self.evaluate(variables.variables_initializer(m2.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
self.assertEqual(self.evaluate(m2.total), 73)
self.assertEqual(self.evaluate(m2.count), 2)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
@test_util.run_all_in_graph_and_eager_modes
class KerasAccuracyTest(test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my_acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, dtypes.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_accuracy_ragged(self):
acc_obj = metrics.Accuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[1], [2], [3], [4]])
rt2 = ragged_factory_ops.constant([[1], [2], [3], [4]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = ragged_factory_ops.constant([[2], [1]])
rt2 = ragged_factory_ops.constant([[2], [0]])
sw_ragged = ragged_factory_ops.constant([[0.5], [0.2]])
result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_ragged(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[1], [0]])
rt2 = ragged_factory_ops.constant([[1], [0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_true squeeze only supported for dense tensors and is
# not supported by ragged tensor (different ranks). --> error
rt1 = ragged_factory_ops.constant([[[1], [1]]])
rt2 = ragged_factory_ops.constant([[1], [0]])
with self.assertRaises(ValueError):
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_binary_accuracy_threshold_ragged(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
rt1 = ragged_factory_ops.constant([[1], [1], [0], [0]])
rt2 = ragged_factory_ops.constant([[0.9], [0.6], [0.4], [0.8]])
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_categorical_accuracy_ragged(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[0, 0, 1], [0, 1, 0]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = ragged_factory_ops.constant([[0, 0, 1], [0, 1, 0]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])
sample_weight = ragged_factory_ops.constant([[0.5], [0.2]])
with self.assertRaises(errors_impl.InvalidArgumentError):
result_t = acc_obj(rt1, rt2, sample_weight)
result = self.evaluate(result_t)
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_ragged(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# verify that correct value is returned
rt1 = ragged_factory_ops.constant([[2], [1]])
rt2 = ragged_factory_ops.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
with self.assertRaises(errors_impl.InvalidArgumentError):
# sparse_categorical_accuracy is not supported for composite/ragged
# tensors.
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with context.graph_mode(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
t = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
@test_util.run_all_in_graph_and_eager_modes
class CosineSimilarityTest(test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = constant_op.constant(self.np_y_true)
self.y_pred = constant_op.constant(self.np_y_pred)
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name='my_cos', dtype=dtypes.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, dtypes.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=constant_op.constant(sample_weight))
expected_loss = np.sum(
self.expected_loss * sample_weight) / np.sum(sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
self.evaluate(variables.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=dtypes.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, dtypes.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=dtypes.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, dtypes.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=dtypes.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, dtypes.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=dtypes.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, dtypes.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, dtypes.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=dtypes.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, dtypes.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced metric = (0.6 + 0.4125) / 2
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.506, result, atol=1e-3)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = constant_op.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted metric = [0.6 * 1.5, 0.4125 * 2]
# reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2)
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.493, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=dtypes.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced metric = (0.485 + 0.2431) / 2
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = constant_op.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted metric = [0.485 * 1.5, 0.2431 * 2]
# reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2)
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=dtypes.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class RootMeanSquaredErrorTest(test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=dtypes.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, dtypes.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6))
y_pred = constant_op.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6, 8))
y_pred = constant_op.constant((1, 3, 2, 3))
sample_weight = constant_op.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class TopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 0, 1], [0, 1, 0]])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = constant_op.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.TopKCategoricalAccuracy(k=2)
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
y_pred = constant_op.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = constant_op.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class SparseTopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([2, 1])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([1, 0, 2])
y_pred = constant_op.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = constant_op.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class LogCoshErrorTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=dtypes.int32)
self.assertEqual(logcosh_obj.name, 'logcosh')
self.assertEqual(logcosh_obj._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class PoissonTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name='poisson', dtype=dtypes.int32)
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj._dtype, dtypes.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, 'poisson')
self.assertEqual(poisson_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class KLDivergenceTest(test.TestCase):
def setup(self):
y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
k_obj = metrics.KLDivergence(name='kld', dtype=dtypes.int32)
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj._dtype, dtypes.int32)
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, 'kld')
self.assertEqual(k_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(variables.variables_initializer(k_obj.variables))
update_op = k_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(variables.variables_initializer(k_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class MeanRelativeErrorTest(test.TestCase):
def test_config(self):
normalizer = constant_op.constant([1, 3], dtype=dtypes.float32)
mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre')
self.assertEqual(mre_obj.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)
mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())
self.assertEqual(mre_obj2.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)
def test_unweighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_y_pred - np_y_true), np_y_true))
y_pred = constant_op.constant(np_y_pred, shape=(1, 4), dtype=dtypes.float32)
y_true = constant_op.constant(np_y_true, shape=(1, 4))
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_weighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)
rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
expected_error = np.sum(rel_errors * sample_weight)
y_pred = constant_op.constant(np_y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(np_y_true)
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_zero_normalizer(self):
y_pred = constant_op.constant([2, 4], dtype=dtypes.float32)
y_true = constant_op.constant([1, 3])
mre_obj = metrics.MeanRelativeError(normalizer=array_ops.zeros_like(y_true))
self.evaluate(variables.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertEqual(self.evaluate(result), 0)
@test_util.run_all_in_graph_and_eager_modes
class MeanIoUTest(test.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou')
self.assertEqual(m_obj.name, 'mean_iou')
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, 'mean_iou')
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = constant_op.constant([0, 1, 0, 1], dtype=dtypes.float32)
y_true = constant_op.constant([0, 0, 1, 1])
sample_weight = constant_op.constant([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = constant_op.constant([[0, 1], [0, 1]], dtype=dtypes.float32)
y_true = constant_op.constant([[0, 0], [1, 1]])
sample_weight = constant_op.constant([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = constant_op.constant([1], dtype=dtypes.float32)
y_true = constant_op.constant([1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(variables.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
class MeanTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_config(self):
m = metrics.MeanTensor(name='mean_by_element')
# check config
self.assertEqual(m.name, 'mean_by_element')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 0)
with self.assertRaisesRegexp(ValueError, 'does not have any result yet'):
m.result()
self.evaluate(m([[3], [5], [3]]))
self.assertAllEqual(m._shape, [3, 1])
m2 = metrics.MeanTensor.from_config(m.get_config())
self.assertEqual(m2.name, 'mean_by_element')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 0)
@test_util.run_in_graph_and_eager_modes
def test_unweighted(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
# check __call__()
self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])
self.assertAllClose(self.evaluate(m.total), [101, 45])
self.assertAllClose(self.evaluate(m.count), [2, 2])
# check reset_states()
m.reset_states()
self.assertAllClose(self.evaluate(m.total), [0, 0])
self.assertAllClose(self.evaluate(m.count), [0, 0])
@test_util.run_in_graph_and_eager_modes
def test_weighted(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
# check scalar weight
result_t = m([100, 30], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [100, 30])
self.assertAllClose(self.evaluate(m.total), [50, 15])
self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)
self.assertAllClose(self.evaluate(m.total), [51, 16])
self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])
self.assertAllClose(self.evaluate(m.total), [51.5, 17])
self.assertAllClose(self.evaluate(m.count), [2, 1.2])
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])
self.assertAllClose(self.evaluate(m.total), [52.5, 18])
self.assertAllClose(self.evaluate(m.count), [3, 1.4])
# check weights expand
m = metrics.MeanTensor(dtype=dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAllClose(self.evaluate(result_t), [[1], [5]])
self.assertAllClose(self.evaluate(m.total), [[1], [1]])
self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])
@test_util.run_in_graph_and_eager_modes
def test_invalid_value_shape(self):
m = metrics.MeanTensor(dtype=dtypes.float64)
m([1])
with self.assertRaisesRegexp(
ValueError, 'MeanTensor input values must always have the same shape'):
m([1, 5])
@test_util.run_in_graph_and_eager_modes
def test_build_in_tf_function(self):
"""Ensure that variables are created correctly in a tf function."""
m = metrics.MeanTensor(dtype=dtypes.float64)
@eager_function.defun
def call_metric(x):
return m(x)
self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])
def test_in_keras_model(self):
with context.eager_mode():
class ModelWithMetric(Model):
def __init__(self):
super(ModelWithMetric, self).__init__()
self.dense1 = layers.Dense(
3, activation='relu', kernel_initializer='ones')
self.dense2 = layers.Dense(
1, activation='sigmoid', kernel_initializer='ones')
self.mean_tensor = metrics.MeanTensor()
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
self.mean_tensor(self.dense1.kernel)
return x
model = ModelWithMetric()
model.compile(
loss='mae',
optimizer='rmsprop',
run_eagerly=True)
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y, batch_size=50)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 2))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 2))
model.evaluate(x, y, batch_size=25)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 4))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 4))
@test_util.run_all_in_graph_and_eager_modes
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = metrics.BinaryCrossentropy(
name='bce', dtype=dtypes.int32, label_smoothing=0.2)
self.assertEqual(bce_obj.name, 'bce')
self.assertEqual(bce_obj._dtype, dtypes.int32)
old_config = bce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)
self.assertEqual(bce_obj2.name, 'bce')
self.assertEqual(bce_obj2._dtype, dtypes.int32)
new_config = bce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
y_pred = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
result = bce_obj(y_true, y_pred)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced metric = (0 + 66.666) / 2
self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = constant_op.constant([1.5, 2.])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Weighted metric = [7.665 * 1.5, 0]
# Reduced metric = 7.665 * 1.5 / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(bce_obj.variables))
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
y_pred = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
sample_weight = constant_op.constant([2., 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted metric = [0, 66.666 * 2.5]
# Reduced metric = 66.666 * 2.5 / (2 + 2.5)
self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)
def test_label_smoothing(self):
logits = constant_op.constant(((100., -100., -100.)))
y_true = constant_op.constant(((1, 0, 1)))
label_smoothing = 0.1
# Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# After label smoothing, label 1 becomes 1 - 0.5L
# label 0 becomes 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(variables.variables_initializer(bce_obj.variables))
result = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = metrics.CategoricalCrossentropy(
name='cce', dtype=dtypes.int32, label_smoothing=0.2)
self.assertEqual(cce_obj.name, 'cce')
self.assertEqual(cce_obj._dtype, dtypes.int32)
old_config = cce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)
self.assertEqual(cce_obj2.name, 'cce')
self.assertEqual(cce_obj2._dtype, dtypes.int32)
new_config = cce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced metric = (0.051 + 2.302) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = constant_op.constant([1.5, 2.])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Weighted metric = [0.051 * 1.5, 2.302 * 2.]
# Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = constant_op.constant([1.5, 2.])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
# Label smoothing: z' = z * (1 - L) + L/n,
# where L = label smoothing value and n = num classes
# Label value 1 becomes: 1 - L + L/n
# Label value 0 becomes: L/n
# y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],
# [0.0333, 0.0333, 0.9333]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],
# [-0.23316, -0.00006, -6.53479]]
# xent = [0.56654, 6.76801]
# Reduced xent = (0.56654 + 6.76801) / 2
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(variables.variables_initializer(cce_obj.variables))
loss = cce_obj(y_true, logits)
self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SparseCategoricalCrossentropyTest(test.TestCase):
def test_config(self):
scce_obj = metrics.SparseCategoricalCrossentropy(
name='scce', dtype=dtypes.int32)
self.assertEqual(scce_obj.name, 'scce')
self.assertEqual(scce_obj.dtype, dtypes.int32)
old_config = scce_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config
scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config)
self.assertEqual(scce_obj2.name, 'scce')
self.assertEqual(scce_obj2.dtype, dtypes.int32)
new_config = scce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = constant_op.constant([1.5, 2.])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Weighted xent = [0.051 * 1.5, 2.302 * 2.]
# Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = constant_op.constant([1.5, 2.])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_axis(self):
scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)
self.evaluate(variables.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# logits = log(y`) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# sum(exp(logits)) = [1, 1]
# softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# log(softmax) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, dtypes.bool)
y_pred = math_ops.cast(y_pred, dtypes.bool)
values = math_ops.logical_and(
math_ops.equal(y_true, True), math_ops.equal(y_pred, True))
values = math_ops.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, dtype=self.dtype)
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
values = math_ops.multiply(values, sample_weight)
self.true_positives.assign_add(math_ops.reduce_sum(values))
def result(self):
return self.true_positives
@test_util.run_all_in_graph_and_eager_modes
class CustomMetricsTest(test.TestCase):
def test_config(self):
btp_obj = BinaryTruePositives(name='btp', dtype=dtypes.int32)
self.assertEqual(btp_obj.name, 'btp')
self.assertEqual(btp_obj.dtype, dtypes.int32)
# Check save and restore config
btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())
self.assertEqual(btp_obj2.name, 'btp')
self.assertEqual(btp_obj2.dtype, dtypes.int32)
def test_unweighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(variables.variables_initializer(btp_obj.variables))
y_true = constant_op.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = constant_op.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
update_op = btp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = btp_obj.result()
self.assertEqual(7, self.evaluate(result))
def test_weighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(variables.variables_initializer(btp_obj.variables))
y_true = constant_op.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = constant_op.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = constant_op.constant([[1.], [1.5], [2.], [2.5]])
result = btp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(12, self.evaluate(result))
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_states_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_states_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_states_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_states_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_states_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
def test_reset_states_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
def test_reset_states_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_states_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_states_auc(self):
auc_obj = metrics.AUC(num_thresholds=3)
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_states_auc_manual_thresholds(self):
auc_obj = metrics.AUC(thresholds=[0.5])
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_states_mean_iou(self):
m_obj = metrics.MeanIoU(num_classes=2)
model = _get_model([m_obj])
x = np.asarray([[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]],
dtype=np.float32)
y = np.asarray([[0], [1], [1], [1]], dtype=np.float32)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/metrics_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
Arguments:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
if not self.l1 and not self.l2:
return K.constant(0.)
regularization = 0.
if self.l1:
regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))
if self.l2:
regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
# Aliases.
@keras_export('keras.regularizers.l1')
def l1(l=0.01):
return L1L2(l1=l)
@keras_export('keras.regularizers.l2')
def l2(l=0.01):
return L1L2(l2=l)
@keras_export('keras.regularizers.l1_l2')
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
return L1L2(l1=l1, l2=l2)
@keras_export('keras.regularizers.serialize')
def serialize(regularizer):
return serialize_keras_object(regularizer)
@keras_export('keras.regularizers.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
@keras_export('keras.regularizers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
# We have to special-case functions that return classes.
# TODO(omalleyt): Turn these into classes or class aliases.
special_cases = ['l1', 'l2', 'l1_l2']
if identifier in special_cases:
# Treat like a class.
return deserialize({'class_name': identifier, 'config': {}})
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:', identifier)
| tensorflow-master | tensorflow/python/keras/regularizers.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests metrics correctness using Keras model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.losses import loss_reduction
from tensorflow.python.platform import test
def get_multi_io_model():
inp_1 = layers.Input(shape=(1,), name='input_1')
inp_2 = layers.Input(shape=(1,), name='input_2')
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out_1 = layers.Dense(
1, kernel_initializer='ones', name='output_1', trainable=False)
out_2 = layers.Dense(
1, kernel_initializer='ones', name='output_2', trainable=False)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
return testing_utils.get_multi_io_model(branch_a, branch_b)
def custom_generator_multi_io():
batch_size = 2
num_samples = 4
inputs = np.asarray([[1.], [2.], [3.], [4.]])
targets = np.asarray([[2.], [4.], [6.], [8.]])
w1 = np.asarray([2., 3., 4., 5.])
w2 = np.asarray([3.5, 2.5, 1.5, 0.5])
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
x = [inputs[start:end], inputs[start:end]]
y = [targets[start:end], targets[start:end]]
w = [w1[start:end], w2[start:end]]
yield x, y, w
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly())
return model
def _custom_generator(self):
batch_size = 2
num_samples = 4
inputs = np.asarray([[1.], [2.], [3.], [4.]])
targets = np.asarray([[2.], [4.], [6.], [8.]])
w1 = np.asarray([2., 3., 4., 5.])
w2 = np.asarray([3.5, 2.5, 1.5, 0.5])
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
x = [inputs[start:end], inputs[start:end]]
y = [targets[start:end], targets[start:end]]
w = [w1[start:end], w2[start:end]]
yield x, y, w
def setUp(self):
super(TestMetricsCorrectnessMultiIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y = np.asarray([[2.], [4.], [6.], [8.]])
self.weights_1 = np.asarray([2., 3., 4., 5.])
self.weights_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Metric `output_1`, `output_2`:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30,
# Count = 2 + 2
# Result = 7.5
# Weighted metric `output_1`:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Weighted metric `output_2`:
# Total = ((3 - 2)^2 * 3.5 + (6 - 4)^2 * 2.5) +
# ((9 - 6)^2 * 1.5 + (12 - 8)^2 * 0.5)
# = 35
# Count = (3.5 + 2.5) + (1.5 + 0.5)
# Result = 4.375
# Loss `output_1`:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = 2 + 2
# Result = 32.5
# Loss `output_2`:
# Total = ((3 - 2)^2 * 3.5 + (6 - 4)^2 * 2.5) +
# ((9 - 6)^2 * 1.5 + (12 - 8)^2 * 0.5)
# = 35
# Count = 2 + 2
# Result = 8.75
# Total loss = 32.5 + 8.75 = 41.25
wmse = 'mean_squared_error_2'
if not tf2.enabled():
wmse = 'weighted_' + wmse
self.expected_fit_result = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [7.5, 7.5],
'output_1_' + wmse: [9.286, 9.286],
'output_2_' + wmse: [4.375, 4.375],
'loss': [41.25, 41.25],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [8.75, 8.75],
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
# 'output_1_mean_squared_error', 'output_1_mean_squared_error_2',
# 'output_2_mean_squared_error', 'output_2_mean_squared_error_2'
self.expected_batch_result = [41.25, 32.5, 8.75, 7.5, 9.286, 7.5, 4.375]
def test_fit(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y, self.y],
batch_size=2,
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3]
mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],
batch_size=10)[3]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.test_on_batch([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_fit_generator(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):
def _get_model(self):
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out = layers.Dense(
1, kernel_initializer='ones', name='output', trainable=False)
model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly())
return model
def _custom_generator(self):
batch_size = 2
num_samples = 4
x = np.asarray([[1.], [2.], [3.], [4.]])
y = np.asarray([[2.], [4.], [6.], [8.]])
w = np.asarray([2., 3., 4., 5.])
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
yield x[start:end], y[start:end], w[start:end]
def setUp(self):
super(TestMetricsCorrectnessSingleIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y = np.asarray([[2.], [4.], [6.], [8.]])
self.weights = np.asarray([2., 3., 4., 5.])
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Metric:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30,
# Count = 2 + 2
# Result = 7.5
# Weighted metric:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Total loss:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130,
# Count = 2 + 2
# Result = 32.5
wmse = 'mean_squared_error_2'
if not tf2.enabled():
wmse = 'weighted_' + wmse
self.expected_fit_result = {
'mean_squared_error': [7.5, 7.5],
wmse: [9.286, 9.286],
'loss': [32.5, 32.5]
}
# In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2'
self.expected_batch_result = [32.5, 7.5, 9.286]
def test_fit(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
sample_weight=self.weights,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_model()
eval_result = model.evaluate(
self.x, self.y, batch_size=2, sample_weight=self.weights)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]
mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_model()
result = model.train_on_batch(self.x, self.y, sample_weight=self.weights)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch(self):
model = self._get_model()
result = model.test_on_batch(self.x, self.y, sample_weight=self.weights)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_fit_generator(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_model()
eval_result = model.evaluate_generator(self._custom_generator(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
@parameterized.parameters([
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.AUTO,
loss_reduction.ReductionV2.SUM
])
class TestOutputLossMetrics(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self, loss):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly())
return model
def setUp(self):
super(TestOutputLossMetrics, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y = np.asarray([[2.], [4.], [6.], [8.]])
self.weights_1 = np.asarray([2., 3., 4., 5.])
self.weights_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Loss `output_1`:
# Per-sample weighted losses
# Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12]
# Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80]
# Result (reduction=SUM) = ((2 + 12) + (36 + 80))/2 = 65
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 130 / 4 = 32.5
# Loss `output_2`:
# Per-sample weighted losses
# Batch 1 = [(3 - 2)^2 * 3.5, (6 - 4)^2 * 2.5)] = [3.5, 10]
# Batch 2 = [(9 - 6)^2 * 1.5, (12 - 8)^2 * 0.5)] = [13.5, 8]
# Result (reduction=SUM) = ((3.5 + 10) + (13.5 + 8))/2 = 17.5
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 35 / 4 = 8.75
# When reduction is 'NONE' loss value that is passed to the optimizer will
# be vector loss but what is reported is a scalar, which is an average of
# all the values in all the batch vectors.
# Total loss = Output_loss_1 + Output_loss_2
sum_over_batch_size_fit_result = {
'loss': [41.25, 41.25],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [8.75, 8.75],
}
self.expected_fit_result = {
loss_reduction.ReductionV2.NONE:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM: {
'loss': [82.5, 82.5],
'output_1_loss': [65, 65],
'output_2_loss': [17.5, 17.5],
},
loss_reduction.ReductionV2.AUTO:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE:
sum_over_batch_size_fit_result,
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
self.expected_batch_result = {
loss_reduction.ReductionV2.NONE: [41.25, 32.5, 8.75],
loss_reduction.ReductionV2.SUM: [82.5, 65, 17.5],
loss_reduction.ReductionV2.AUTO: [41.25, 32.5, 8.75],
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [41.25, 32.5, 8.75],
}
def test_fit(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate([self.x, self.x], [self.y, self.y],
batch_size=2,
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
def test_train_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.train_on_batch([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_test_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.test_on_batch([self.x, self.x], [self.y, self.y],
sample_weight={
'output_1': self.weights_1,
'output_2': self.weights_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_fit_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit_generator(
custom_generator_multi_io(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/metrics_correctness_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number of arrays and their shape
must match number of the dimensions of the weights of the optimizer
(i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD in the relevant
direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Initial learning rate decay.
# References
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (1. -
momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, trackable.Trackable):
"""Wrapper class for native TensorFlow optimizers."""
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_trackable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_trackable(self.iterations, name='global_step')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| tensorflow-master | tensorflow/python/keras/optimizers.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BackendConfigTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_espilon(self):
epsilon = 1e-2
keras.backend_config.set_epsilon(epsilon)
self.assertEqual(keras.backend_config.epsilon(), epsilon)
keras.backend_config.set_epsilon(1e-7)
self.assertEqual(keras.backend_config.epsilon(), 1e-7)
def test_floatx(self):
floatx = 'float64'
keras.backend_config.set_floatx(floatx)
self.assertEqual(keras.backend_config.floatx(), floatx)
keras.backend_config.set_floatx('float32')
self.assertEqual(keras.backend_config.floatx(), 'float32')
def test_image_data_format(self):
image_data_format = 'channels_first'
keras.backend_config.set_image_data_format(image_data_format)
self.assertEqual(keras.backend_config.image_data_format(),
image_data_format)
keras.backend_config.set_image_data_format('channels_last')
self.assertEqual(keras.backend_config.image_data_format(), 'channels_last')
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/backend_config_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for exporting TensorFlow ops under tf.keras.*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=bad-continuation
keras_export(v1=["keras.initializers.Initializer"])(
init_ops.Initializer)
keras_export(v1=["keras.initializers.Zeros", "keras.initializers.zeros"])(
init_ops.Zeros)
keras_export(v1=["keras.initializers.Ones", "keras.initializers.ones"])(
init_ops.Ones)
keras_export(v1=["keras.initializers.Constant", "keras.initializers.constant"])(
init_ops.Constant)
keras_export(v1=["keras.initializers.VarianceScaling"])(
init_ops.VarianceScaling)
keras_export(v1=["keras.initializers.Orthogonal",
"keras.initializers.orthogonal"])(
init_ops.Orthogonal)
keras_export(v1=["keras.initializers.Identity",
"keras.initializers.identity"])(
init_ops.Identity)
keras_export(v1=["keras.initializers.glorot_uniform"])(
init_ops.GlorotUniform)
keras_export(v1=["keras.initializers.glorot_normal"])(
init_ops.GlorotNormal)
keras_export(v1=["keras.initializers.lecun_normal"])(
init_ops.lecun_normal)
keras_export(v1=["keras.initializers.lecun_uniform"])(
init_ops.lecun_uniform)
keras_export(v1=["keras.initializers.he_normal"])(
init_ops.he_normal)
keras_export(v1=["keras.initializers.he_uniform"])(
init_ops.he_uniform)
keras_export("keras.initializers.Initializer", v1=[])(
init_ops_v2.Initializer)
keras_export(
"keras.initializers.Zeros", "keras.initializers.zeros", v1=[])(
init_ops_v2.Zeros)
keras_export(
"keras.initializers.Ones", "keras.initializers.ones", v1=[])(
init_ops_v2.Ones)
keras_export(
"keras.initializers.Constant", "keras.initializers.constant", v1=[])(
init_ops_v2.Constant)
keras_export("keras.initializers.VarianceScaling", v1=[])(
init_ops_v2.VarianceScaling)
keras_export(
"keras.initializers.Orthogonal", "keras.initializers.orthogonal", v1=[])(
init_ops_v2.Orthogonal)
keras_export(
"keras.initializers.Identity", "keras.initializers.identity", v1=[])(
init_ops_v2.Identity)
keras_export(
"keras.initializers.GlorotUniform",
"keras.initializers.glorot_uniform",
v1=[])(
init_ops_v2.GlorotUniform)
keras_export(
"keras.initializers.GlorotNormal",
"keras.initializers.glorot_normal",
v1=[])(
init_ops_v2.GlorotNormal)
keras_export("keras.initializers.lecun_normal", v1=[])(
init_ops_v2.lecun_normal)
keras_export("keras.initializers.lecun_uniform", v1=[])(
init_ops_v2.lecun_uniform)
keras_export("keras.initializers.he_normal", v1=[])(
init_ops_v2.he_normal)
keras_export("keras.initializers.he_uniform", v1=[])(
init_ops_v2.he_uniform)
keras_export("keras.initializers.RandomNormal", v1=[])(
init_ops_v2.RandomNormal)
keras_export("keras.initializers.RandomUniform", v1=[])(
init_ops_v2.RandomUniform)
keras_export("keras.initializers.TruncatedNormal", v1=[])(
init_ops_v2.TruncatedNormal)
# pylint: enable=bad-continuation
keras_export(v1=["keras.backend.name_scope"])(ops.name_scope)
| tensorflow-master | tensorflow/python/keras/ops.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer serialization / deserialization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python import tf2
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import init_ops_v2
# These imports are brought in so that keras.initializers.deserialize
# has them available in module_objects.
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import GlorotNormal
from tensorflow.python.ops.init_ops import GlorotUniform
from tensorflow.python.ops.init_ops import he_normal # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import he_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import lecun_normal # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import lecun_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal as TFRandomNormal
from tensorflow.python.ops.init_ops import RandomUniform as TFRandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal as TFTruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Zeros
# pylint: disable=unused-import, disable=line-too-long
from tensorflow.python.ops.init_ops_v2 import Constant as ConstantV2
from tensorflow.python.ops.init_ops_v2 import GlorotNormal as GlorotNormalV2
from tensorflow.python.ops.init_ops_v2 import GlorotUniform as GlorotUniformV2
from tensorflow.python.ops.init_ops_v2 import he_normal as he_normalV2
from tensorflow.python.ops.init_ops_v2 import he_uniform as he_uniformV2
from tensorflow.python.ops.init_ops_v2 import Identity as IdentityV2
from tensorflow.python.ops.init_ops_v2 import Initializer as InitializerV2
from tensorflow.python.ops.init_ops_v2 import lecun_normal as lecun_normalV2
from tensorflow.python.ops.init_ops_v2 import lecun_uniform as lecun_uniformV2
from tensorflow.python.ops.init_ops_v2 import Ones as OnesV2
from tensorflow.python.ops.init_ops_v2 import Orthogonal as OrthogonalV2
from tensorflow.python.ops.init_ops_v2 import RandomNormal as RandomNormalV2
from tensorflow.python.ops.init_ops_v2 import RandomUniform as RandomUniformV2
from tensorflow.python.ops.init_ops_v2 import TruncatedNormal as TruncatedNormalV2
from tensorflow.python.ops.init_ops_v2 import VarianceScaling as VarianceScalingV2
from tensorflow.python.ops.init_ops_v2 import Zeros as ZerosV2
# pylint: enable=unused-import, enable=line-too-long
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal'])
class TruncatedNormal(TFTruncatedNormal):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate. Defaults to 0.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
A TruncatedNormal instance.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(TruncatedNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomUniform',
'keras.initializers.uniform',
'keras.initializers.random_uniform'])
class RandomUniform(TFRandomUniform):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate. Defaults to -0.05.
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type.
Returns:
A RandomUniform instance.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None,
dtype=dtypes.float32):
super(RandomUniform, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
@keras_export(v1=['keras.initializers.RandomNormal',
'keras.initializers.normal',
'keras.initializers.random_normal'])
class RandomNormal(TFRandomNormal):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate. Defaults to 0.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate. Defaults to 0.05.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
RandomNormal instance.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None, dtype=dtypes.float32):
super(RandomNormal, self).__init__(
mean=mean, stddev=stddev, seed=seed, dtype=dtype)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
# Utility functions
@keras_export('keras.initializers.serialize')
def serialize(initializer):
return serialize_keras_object(initializer)
@keras_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
"""Return an `Initializer` object from its config."""
if tf2.enabled():
# Class names are the same for V1 and V2 but the V2 classes
# are aliased in this file so we need to grab them directly
# from `init_ops_v2`.
module_objects = {
obj_name: getattr(init_ops_v2, obj_name)
for obj_name in dir(init_ops_v2)
}
else:
module_objects = globals()
return deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='initializer')
@keras_export('keras.initializers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
# We have to special-case functions that return classes.
# TODO(omalleyt): Turn these into classes or class aliases.
special_cases = ['he_normal', 'he_uniform', 'lecun_normal', 'lecun_uniform']
if identifier in special_cases:
# Treat like a class.
return deserialize({'class_name': identifier, 'config': {}})
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
# pylint: enable=invalid-name
| tensorflow-master | tensorflow/python/keras/initializers.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegexp(ValueError,
'Invalid AUC curve value "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid AUC summation method value "Invalid".'):
metrics.AUC(summation_method='Invalid')
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/metrics_confusion_matrix_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import csv
import io
import json
import os
import re
import tempfile
import time
import numpy as np
import six
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util.tf_export import keras_export
try:
import requests
except ImportError:
requests = None
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Arguments:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode)
callback_list.model.stop_training = False
return callback_list
def set_callback_parameters(callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN):
"""Sets callback parameters.
Arguments:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = model.metrics_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT and hasattr(model, 'metrics_names'):
callback_metrics = copy.copy(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, 'next') or hasattr(data, '__next__') or isinstance(
data, (Sequence, iterator_ops.Iterator, iterator_ops.IteratorV2)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
if mode in {ModeKeys.TRAIN, ModeKeys.TEST}:
if hasattr(model, 'metrics_names'):
for label, output in zip(model.metrics_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_batch_timing()
def _reset_batch_timing(self):
self._delta_t_batch = 0.
self._delta_ts = collections.defaultdict(
lambda: collections.deque([], maxlen=self.queue_length))
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
hook_name = 'on_{mode}_batch_{hook}'.format(mode=mode, hook=hook)
if hook == 'begin':
self._t_enter_batch = time.time()
if hook == 'end':
# Batch is ending, calculate batch time.
self._delta_t_batch = time.time() - self._t_enter_batch
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
batch_hook = getattr(callback, hook_name)
batch_hook(batch, logs)
self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts[hook_name])
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method (%s) is slow compared '
'to the batch update (%f). Check your callbacks.', hook_name,
delta_t_median)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._reset_batch_timing()
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
validation_data: Deprecated. Do not use.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Model` class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Arguments:
epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Has keys `batch` and `size` representing the current batch
number and the size of the batch.
"""
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dict. Metric results for this batch.
"""
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Arguments:
logs: dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Arguments:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
if self.verbose:
if self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def on_batch_begin(self, batch, logs=None):
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
if self.use_steps:
self.seen += num_steps
else:
self.seen += batch_size * num_steps
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and (self.target is None or self.seen < self.target):
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, the latest best model according
to the quantity monitored will not be overwritten.
mode: one of {auto, min, max}. If `save_best_only=True`, the decision to
overwrite the current save file is made based on either the maximization
or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of a batch at which this many samples have been seen since
last saving. Note that if the saving isn't aligned to epochs, the
monitored metric may potentially be less reliable (it could reflect as
little as 1 batch, since the metrics get reset every epoch). Defaults to
`'epoch'`
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
**kwargs):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
# Deprecated field `load_weights_on_restart` is for loading the checkpoint
# file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of samples seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
if multi_worker_util.in_multi_worker_mode():
# pylint: disable=protected-access
# MultiWorkerTrainingState is used to manage the training state needed
# for preemption-recovery of a worker in multi-worker training.
self.model._training_state = (
training_state.MultiWorkerTrainingState(self.model, self.filepath))
self._training_state = self.model._training_state
if self._training_state.restore():
# If the training state needs to be and is successfully restored,
# it is recovering from a previous failure (or preemption). In such
# case, do not load the weights from user specified file path.
return
# If this is not multi worker training, restoring is not needed, or
# restoring failed, check if it should load weights on restart.
if self.load_weights_on_restart:
if (not multi_worker_util.in_multi_worker_mode()
or multi_worker_util.should_load_checkpoint()):
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(
self.filepath))
if (filepath_to_load is not None and
training_state.checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_end(self, logs=None):
if multi_worker_util.in_multi_worker_mode():
# In multi-worker training, on successful exit of training, delete the
# training state backup file that was saved for the purpose of worker
# recovery.
self._training_state.delete_backup()
# Restore the training state so the model is ready for next (possible)
# multi worker training.
del self._training_state
del self.model._training_state
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get('size', 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
if multi_worker_util.in_multi_worker_mode():
# Exclude training state variables in user-requested checkpoint file.
with self._training_state.untrack_vars():
self._save_model(epoch=epoch, logs=logs)
else:
self._save_model(epoch=epoch, logs=logs)
if multi_worker_util.in_multi_worker_mode():
# For multi-worker training, back up the weights and current training
# state for possible future recovery.
# TODO(rchao): Call `back_up` at finer period such as N steps.
self._training_state.back_up(epoch)
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file()
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
if not multi_worker_util.in_multi_worker_mode(
) or multi_worker_util.should_save_checkpoint():
return self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we use a temp filepath to store a dummy checkpoint, so
# it writes to a file that will be removed at the end of `_save_model()`
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
self._temp_file_dir = tempfile.mkdtemp()
extension = os.path.splitext(self.filepath)[1]
return os.path.join(self._temp_file_dir, 'temp' + extension)
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
if multi_worker_util.in_multi_worker_mode(
) and not multi_worker_util.should_save_checkpoint():
file_io.delete_recursively(self._temp_file_dir)
del self._temp_file_dir
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_io.file_exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If send_as_json is set to True, the content type of the request will be
application/json. Otherwise the serialized JSON will be sent within a form.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as application/json.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
```python
# This function keeps the learning rate at 0.001 for the first ten epochs
# and decreases it exponentially after that.
def scheduler(epoch):
if epoch < 10:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (10 - epoch))
callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling. Must run in TensorFlow eager mode.
embeddings_freq: frequency (in epochs) at which embedding layers will
be visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
# A collection of file writers currently in use, to be closed when
# training ends for this callback. Writers are keyed by the
# directory name under the root logdir: e.g., "train" or
# "validation".
self._writers = {}
self._train_run_name = 'train'
self._validation_run_name = 'validation'
self._profile_batch = profile_batch
# True when a trace is running.
self._is_tracing = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
with context.eager_mode():
self._close_writers()
if self.write_graph:
with self._get_writer(self._train_run_name).as_default():
with summary_ops_v2.always_record_summaries():
if not model.run_eagerly:
summary_ops_v2.graph(K.get_graph(), step=0)
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
summary_ops_v2.keras_model('keras', self.model, step=0)
if self.embeddings_freq:
self._configure_embeddings()
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tensorflow.python.keras.layers import embeddings
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
embedding.tensor_name = layer.embeddings.name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in embedding.metadata_path:
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata:
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
class DummyWriter(object):
"""Dummy writer to conform to `Projector` API."""
def __init__(self, logdir):
self.logdir = logdir
def get_logdir(self):
return self.logdir
writer = DummyWriter(self.log_dir)
projector.visualize_embeddings(writer, config)
def _close_writers(self):
"""Close all remaining open file writers owned by this callback.
If there are no such file writers, this is a no-op.
"""
with context.eager_mode():
for writer in six.itervalues(self._writers):
writer.close()
self._writers.clear()
def _get_writer(self, writer_name):
"""Get a summary writer for the given subdirectory under the logdir.
A writer will be created if it does not yet exist.
Arguments:
writer_name: The name of the directory for which to create or
retrieve a writer. Should be either `self._train_run_name` or
`self._validation_run_name`.
Returns:
A `SummaryWriter` object.
"""
if writer_name not in self._writers:
path = os.path.join(self.log_dir, writer_name)
writer = summary_ops_v2.create_file_writer_v2(path)
self._writers[writer_name] = writer
return self._writers[writer_name]
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
Arguments:
batch: Integer, index of batch within the current epoch.
logs: Dict. Metric results for this batch.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_tracing:
self._log_trace()
elif (not self._is_tracing and
self._total_batches_seen == self._profile_batch - 1):
self._enable_trace()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def on_train_end(self, logs=None):
if self._is_tracing:
self._log_trace()
self._close_writers()
def _enable_trace(self):
if context.executing_eagerly():
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def _log_trace(self):
if context.executing_eagerly():
with self._get_writer(self._train_run_name).as_default(), \
summary_ops_v2.always_record_summaries():
# TODO(b/126388999): Remove step info in the summary name.
summary_ops_v2.trace_export(
name='batch_%d' % self._total_batches_seen,
step=self._total_batches_seen,
profiler_outdir=os.path.join(self.log_dir, 'train'))
self._is_tracing = False
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Group metrics by the name of their associated file writer. Values
# are lists of metrics, as (name, scalar_value) pairs.
logs_by_writer = {
self._train_run_name: [],
self._validation_run_name: [],
}
validation_prefix = 'val_'
for (name, value) in logs.items():
if name in ('batch', 'size', 'num_steps'):
# Scrub non-metric items.
continue
if name.startswith(validation_prefix):
name = name[len(validation_prefix):]
writer_name = self._validation_run_name
else:
writer_name = self._train_run_name
name = prefix + name # assign batch or epoch prefix
logs_by_writer[writer_name].append((name, value))
with context.eager_mode():
with summary_ops_v2.always_record_summaries():
for writer_name in logs_by_writer:
these_logs = logs_by_writer[writer_name]
if not these_logs:
# Don't create a "validation" events file if we don't
# actually have any validation data.
continue
writer = self._get_writer(writer_name)
with writer.as_default():
for (name, value) in these_logs:
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
writer = self._get_writer(self._train_run_name)
with context.eager_mode(), \
writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self.log_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced. new_lr = lr *
factor
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode, lr will be reduced when the
quantity monitored has stopped decreasing; in `max` mode it will be
reduced when the quantity monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation after
lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| tensorflow-master | tensorflow/python/keras/callbacks.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.sparse
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def compare_single_input_op_to_numpy(keras_op,
np_op,
input_shape,
dtype='float32',
negative_values=True,
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
inputs = 2. * np.random.random(input_shape)
if negative_values:
inputs -= 1.
keras_output = keras_op(keras.backend.variable(inputs, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
def compare_two_inputs_op_to_numpy(keras_op,
np_op,
input_shape_a,
input_shape_b,
dtype='float32',
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
input_a = np.random.random(input_shape_a)
input_b = np.random.random(input_shape_b)
keras_output = keras_op(keras.backend.variable(input_a, dtype=dtype),
keras.backend.variable(input_b, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),
*np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
class BackendResetTest(test.TestCase, parameterized.TestCase):
@test_util.run_all_in_graph_and_eager_modes
def test_new_config(self):
# User defined jit setting
config.set_optimizer_jit(False)
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# New session has the same jit setting
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# Change respected
config.set_optimizer_jit(True)
sess = keras.backend.get_session()
default_config = context.context().config
self.assertEqual(
sess._config.graph_options.optimizer_options.global_jit_level,
default_config.graph_options.optimizer_options.global_jit_level)
keras.backend.clear_session()
# We can't use the normal parameterized decorator because the test session
# will block graph clearing.
@parameterized.named_parameters(('_v1', context.graph_mode),
('_v2', context.eager_mode))
def test_new_graph(self, test_context):
with test_context():
g_old = keras.backend.get_graph()
keras.backend.clear_session()
g = keras.backend.get_graph()
assert g_old is not g
@test_util.run_all_in_graph_and_eager_modes
class BackendUtilsTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_get_reset_uids(self):
self.assertEqual(keras.backend.get_uid('foo'), 1)
self.assertEqual(keras.backend.get_uid('foo'), 2)
keras.backend.reset_uids()
self.assertEqual(keras.backend.get_uid('foo'), 1)
def test_learning_phase(self):
with self.cached_session() as sess:
with self.assertRaises(ValueError):
keras.backend.set_learning_phase(2)
# Test running with a learning-phase-consuming layer
with keras.backend.learning_phase_scope(0):
x = keras.Input((3,))
y = keras.layers.BatchNormalization()(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
sess.run(y, feed_dict={x: np.random.random((2, 3))})
def test_learning_phase_name(self):
with ops.name_scope('test_scope'):
# Test that outer name scopes do not affect the learning phase's name.
lp = keras.backend.symbolic_learning_phase()
self.assertEqual(lp.name, 'keras_learning_phase:0')
def test_learning_phase_scope(self):
initial_learning_phase = keras.backend.learning_phase()
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with keras.backend.learning_phase_scope(0):
self.assertEqual(keras.backend.learning_phase(), 0)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with self.assertRaises(ValueError):
with keras.backend.learning_phase_scope(None):
pass
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
new_learning_phase = 0
keras.backend.set_learning_phase(new_learning_phase)
self.assertEqual(keras.backend.learning_phase(), new_learning_phase)
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), new_learning_phase)
def test_learning_phase_scope_in_graph(self):
initial_learning_phase_outside_graph = keras.backend.learning_phase()
with keras.backend.get_graph().as_default():
initial_learning_phase_in_graph = keras.backend.learning_phase()
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
with keras.backend.learning_phase_scope(1):
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
with keras.backend.get_graph().as_default():
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_in_graph)
self.assertEqual(keras.backend.learning_phase(),
initial_learning_phase_outside_graph)
def test_int_shape(self):
x = keras.backend.ones(shape=(3, 4))
self.assertEqual(keras.backend.int_shape(x), (3, 4))
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(None, 4))
self.assertEqual(keras.backend.int_shape(x), (None, 4))
def test_in_train_phase(self):
y1 = keras.backend.variable(1)
y2 = keras.backend.variable(2)
if context.executing_eagerly():
with keras.backend.learning_phase_scope(0):
y_val_test = keras.backend.in_train_phase(y1, y2).numpy()
with keras.backend.learning_phase_scope(1):
y_val_train = keras.backend.in_train_phase(y1, y2).numpy()
else:
y = keras.backend.in_train_phase(y1, y2)
f = keras.backend.function([keras.backend.learning_phase()], [y])
y_val_test = f([0])[0]
y_val_train = f([1])[0]
self.assertAllClose(y_val_test, 2)
self.assertAllClose(y_val_train, 1)
def test_is_keras_tensor(self):
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_keras_tensor(x), False)
x = keras.Input(shape=(1,))
self.assertEqual(keras.backend.is_keras_tensor(x), True)
with self.assertRaises(ValueError):
keras.backend.is_keras_tensor(0)
def test_stop_gradient(self):
x = keras.backend.variable(1)
y = keras.backend.stop_gradient(x)
if not context.executing_eagerly():
self.assertEqual(y.op.name[:12], 'StopGradient')
xs = [keras.backend.variable(1) for _ in range(3)]
ys = keras.backend.stop_gradient(xs)
if not context.executing_eagerly():
for y in ys:
self.assertEqual(y.op.name[:12], 'StopGradient')
def test_placeholder(self):
x = keras.backend.placeholder(shape=(3, 4))
self.assertEqual(x.shape.as_list(), [3, 4])
x = keras.backend.placeholder(shape=(3, 4), sparse=True)
self.assertEqual(x.shape.as_list(), [3, 4])
def test_is_placeholder(self):
x = keras.backend.placeholder(shape=(1,))
self.assertEqual(keras.backend.is_placeholder(x), True)
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_placeholder(x), False)
def test_print_tensor(self):
# Unfortunately it seems impossible to use `mock` (or any other method)
# to capture stdout when used inside a graph or graph function, thus
# we cannot test correctness.
# The message gets correctly printed in practice.
x = keras.backend.placeholder(shape=())
y = keras.backend.print_tensor(x, 'eager=%s' % context.executing_eagerly())
f = keras.backend.function(x, y)
f(0)
@test_util.run_all_in_graph_and_eager_modes
class BackendVariableTest(test.TestCase):
def test_zeros(self):
x = keras.backend.zeros((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones(self):
x = keras.backend.ones((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.ones((3, 4)))
def test_eye(self):
x = keras.backend.eye(4)
val = keras.backend.eval(x)
self.assertAllClose(val, np.eye(4))
def test_zeros_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.zeros_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones_like(self):
x = keras.backend.zeros((3, 4))
y = keras.backend.ones_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.ones((3, 4)))
def test_random_uniform_variable(self):
x = keras.backend.random_uniform_variable((30, 20), low=1, high=2, seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1.5, atol=1e-1)
self.assertAllClose(val.max(), 2., atol=1e-1)
self.assertAllClose(val.min(), 1., atol=1e-1)
def test_random_normal_variable(self):
x = keras.backend.random_normal_variable((30, 20), 1., 0.5, seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1., atol=1e-1)
self.assertAllClose(val.std(), 0.5, atol=1e-1)
def test_count_params(self):
x = keras.backend.zeros((4, 5))
val = keras.backend.count_params(x)
self.assertAllClose(val, 20)
def test_constant(self):
ref_val = np.random.random((3, 4)).astype('float32')
x = keras.backend.constant(ref_val)
val = keras.backend.eval(x)
self.assertAllClose(val, ref_val)
def test_sparse_variable(self):
val = scipy.sparse.eye(10)
x = keras.backend.variable(val)
self.assertTrue(isinstance(x, sparse_tensor.SparseTensor))
y = keras.backend.to_dense(x)
self.assertFalse(keras.backend.is_sparse(y))
@test_util.run_all_in_graph_and_eager_modes
class BackendLinearAlgebraTest(test.TestCase):
def test_dot(self):
x = keras.backend.ones(shape=(2, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.shape.as_list(), [2, 4])
x = keras.backend.ones(shape=(32, 28, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.shape.as_list(), [32, 28, 4])
def test_batch_dot(self):
x = keras.backend.ones(shape=(32, 20, 1))
y = keras.backend.ones(shape=(32, 30, 20))
xy = keras.backend.batch_dot(x, y, axes=[1, 2])
self.assertEqual(xy.shape.as_list(), [32, 1, 30])
# TODO(fchollet): insufficiently tested.
def test_reduction_ops(self):
ops_to_test = [
(keras.backend.max, np.max),
(keras.backend.min, np.min),
(keras.backend.sum, np.sum),
(keras.backend.prod, np.prod),
(keras.backend.var, np.var),
(keras.backend.std, np.std),
(keras.backend.mean, np.mean),
(keras.backend.argmin, np.argmin),
(keras.backend.argmax, np.argmax),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': 1},
np_kwargs={'axis': 1})
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': -1},
np_kwargs={'axis': -1})
if 'keepdims' in tf_inspect.getargspec(keras_op).args:
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7, 5),
keras_kwargs={'axis': 1,
'keepdims': True},
np_kwargs={'axis': 1,
'keepdims': True})
def test_elementwise_ops(self):
ops_to_test = [
(keras.backend.square, np.square),
(keras.backend.abs, np.abs),
(keras.backend.round, np.round),
(keras.backend.sign, np.sign),
(keras.backend.sin, np.sin),
(keras.backend.cos, np.cos),
(keras.backend.exp, np.exp),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7))
ops_to_test = [
(keras.backend.sqrt, np.sqrt),
(keras.backend.log, np.log),
]
for keras_op, np_op in ops_to_test:
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7),
negative_values=False)
compare_single_input_op_to_numpy(
keras.backend.clip, np.clip,
input_shape=(6, 4),
keras_kwargs={'min_value': 0.1, 'max_value': 2.4},
np_kwargs={'a_min': 0.1, 'a_max': 1.4})
compare_single_input_op_to_numpy(
keras.backend.pow, np.power,
input_shape=(6, 4),
keras_args=[3],
np_args=[3])
def test_two_tensor_ops(self):
ops_to_test = [
(keras.backend.equal, np.equal),
(keras.backend.not_equal, np.not_equal),
(keras.backend.greater, np.greater),
(keras.backend.greater_equal, np.greater_equal),
(keras.backend.less, np.less),
(keras.backend.less_equal, np.less_equal),
(keras.backend.maximum, np.maximum),
(keras.backend.minimum, np.minimum),
]
for keras_op, np_op in ops_to_test:
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(4, 7))
def test_relu(self):
x = ops.convert_to_tensor([[-4, 0], [2, 7]], 'float32')
# standard relu
relu_op = keras.backend.relu(x)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# alpha (leaky relu used)
relu_op = keras.backend.relu(x, alpha=0.5)
if not context.executing_eagerly():
self.assertTrue('LeakyRelu' in relu_op.name)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, 0], [2, 7]])
# max_value < some elements
relu_op = keras.backend.relu(x, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 5]])
# nn.relu6 used
relu_op = keras.backend.relu(x, max_value=6)
if not context.executing_eagerly():
self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 6]])
# max value > 6
relu_op = keras.backend.relu(x, max_value=10)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# max value is float
relu_op = keras.backend.relu(x, max_value=4.3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 4.3]])
# max value == 0
relu_op = keras.backend.relu(x, max_value=0)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 0]])
# alpha and max_value
relu_op = keras.backend.relu(x, alpha=0.25, max_value=3)
self.assertAllClose(keras.backend.eval(relu_op), [[-1, 0], [2, 3]])
# threshold
relu_op = keras.backend.relu(x, threshold=3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 7]])
# threshold is float
relu_op = keras.backend.relu(x, threshold=1.5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# threshold is negative
relu_op = keras.backend.relu(x, threshold=-5)
self.assertAllClose(keras.backend.eval(relu_op), [[-4, 0], [2, 7]])
# threshold and max_value
relu_op = keras.backend.relu(x, threshold=3, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 5]])
# threshold and alpha
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 7]])
# threshold, alpha, and max_value
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 5]])
@test_util.run_all_in_graph_and_eager_modes
class BackendShapeOpsTest(test.TestCase):
def test_reshape(self):
compare_single_input_op_to_numpy(keras.backend.reshape, np.reshape,
input_shape=(4, 7),
keras_args=[(2, 14)],
np_args=[(2, 14)])
def test_concatenate(self):
a = keras.backend.variable(np.ones((1, 2, 3)))
b = keras.backend.variable(np.ones((1, 2, 2)))
y = keras.backend.concatenate([a, b], axis=-1)
self.assertEqual(y.shape.as_list(), [1, 2, 5])
def test_permute_dimensions(self):
compare_single_input_op_to_numpy(keras.backend.permute_dimensions,
np.transpose,
input_shape=(4, 7),
keras_args=[(1, 0)],
np_args=[(1, 0)])
def test_resize_images(self):
height_factor = 2
width_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 3)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 3, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_images(x,
height_factor,
width_factor,
data_format='unknown')
def test_resize_volumes(self):
height_factor = 2
width_factor = 2
depth_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 2, 3)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 4, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2, 2)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.shape.as_list(), [1, 3, 4, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format='unknown')
def test_repeat_elements(self):
x = keras.backend.variable(np.ones((1, 3, 2)))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.shape.as_list(), [1, 9, 2])
# Use with a dynamic axis:
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(2, None, 2))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.shape.as_list(), [2, None, 2])
def test_repeat(self):
x = keras.backend.variable(np.ones((1, 3)))
y = keras.backend.repeat(x, 2)
self.assertEqual(y.shape.as_list(), [1, 2, 3])
def test_flatten(self):
compare_single_input_op_to_numpy(keras.backend.flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4 * 7 * 6,)])
def test_batch_flatten(self):
compare_single_input_op_to_numpy(keras.backend.batch_flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4, 7 * 6)])
def test_temporal_padding(self):
def ref_op(x, padding):
shape = list(x.shape)
shape[1] += padding[0] + padding[1]
y = np.zeros(tuple(shape))
y[:, padding[0]:-padding[1], :] = x
return y
compare_single_input_op_to_numpy(keras.backend.temporal_padding,
ref_op,
input_shape=(4, 7, 6),
keras_args=[(2, 3)],
np_args=[(2, 3)])
def test_spatial_2d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x
return y
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_first'})
def test_spatial_3d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
shape[3] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1],
:] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
shape[4] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:, :,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1]] = x
return y
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_first'})
@test_util.run_all_in_graph_and_eager_modes
class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
def test_bias_add(self):
keras_op = keras.backend.bias_add
np_op = np.add
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 2, 7),
input_shape_b=(7,))
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((3, 4))
keras.backend.bias_add(x, b)
with self.assertRaises(ValueError):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((4,))
keras.backend.bias_add(x, b, data_format='unknown')
def test_bias_add_channels_first(self):
def keras_op(x, b):
return keras.backend.bias_add(x, b, data_format='channels_first')
def np_op(x, b):
if x.ndim == 3:
b = b.reshape((1, b.shape[0], 1))
if x.ndim == 4:
b = b.reshape((1, b.shape[0], 1, 1))
return x + b
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(3,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(3,))
def test_pool2d(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 9, 9, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 3])
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2, 2), strides=(2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')
def test_pool3d(self):
val = np.random.random((10, 3, 10, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 9, 9, 9, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 3])
def test_conv1d(self):
val = np.random.random((10, 4, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(2,),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5])
def test_local_conv_channels_dim(self):
filters = 3
batch_size = 2
for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]:
channels_in = input_shape[0]
input_spatial_shape = input_shape[1:]
dim = len(input_spatial_shape)
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs_cf = keras.backend.variable(inputs)
for kernel_size in [1, 2]:
for stride in [1, 2]:
kernel_sizes = (kernel_size,) * dim
strides = (stride,) * dim
output_shape = tuple([(i - kernel_size + stride) // stride
for i in input_spatial_shape])
kernel_shape = (np.prod(output_shape),
np.prod(kernel_sizes) * channels_in,
filters)
kernel = np.random.normal(
0,
1,
output_shape + (channels_in, np.prod(kernel_sizes), filters)
)
kernel_cf = np.reshape(kernel, kernel_shape)
kernel_cf = keras.backend.variable(kernel_cf)
conv_cf = keras.backend.local_conv(inputs_cf,
kernel_cf,
kernel_sizes,
strides,
output_shape,
'channels_first')
inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +
[1])
inputs_cl = keras.backend.variable(inputs_cl)
kernel_cl = np.reshape(
np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),
kernel_shape
)
kernel_cl = keras.backend.variable(kernel_cl)
conv_cl = keras.backend.local_conv(inputs_cl,
kernel_cl,
kernel_sizes,
strides,
output_shape,
'channels_last')
conv_cf = keras.backend.eval(conv_cf)
conv_cl = keras.backend.eval(conv_cl)
self.assertAllCloseAccordingToType(
conv_cf,
np.transpose(conv_cl,
[0, dim + 1] + list(range(1, dim + 1))),
atol=1e-5
)
@parameterized.named_parameters(
('local_conv1d', (5, 6), (3,), (1,), (3,)),
('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))
def test_local_conv_1d_and_2d(self,
input_shape,
kernel_sizes,
strides,
output_shape):
filters = 3
batch_size = 2
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs = keras.backend.variable(inputs)
kernel = np.random.normal(0, 1, (np.prod(output_shape),
np.prod(kernel_sizes) * input_shape[-1],
filters))
kernel = keras.backend.variable(kernel)
local_conv = keras.backend.local_conv(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
if len(output_shape) == 1:
local_conv_dim = keras.backend.local_conv1d(inputs,
kernel,
kernel_sizes,
strides,
'channels_last')
else:
local_conv_dim = keras.backend.local_conv2d(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
local_conv = keras.backend.eval(local_conv)
local_conv_dim = keras.backend.eval(local_conv_dim)
self.assertAllCloseAccordingToType(local_conv, local_conv_dim)
def test_conv2d(self):
kernel_val = np.random.random((3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
# Test channels_first
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])
# Test channels_last
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])
# Test same padding
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k,
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
# Test dilation_rate
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, dilation_rate=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
# Test strides
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])
# Test invalid arguments
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2, 2))
def test_conv2d_transpose(self):
input_size = (7, 8)
kernel_size = (3, 3)
input_depth = 6
filters = 6
batch_size = 2
kernel_val = np.random.random(kernel_size + (input_depth, filters))
k = keras.backend.variable(kernel_val)
# Test channels_first
input_val = np.random.random((batch_size, input_depth) + input_size)
x = keras.backend.variable(input_val)
y = keras.backend.conv2d_transpose(x, k, (batch_size, filters) + input_size,
padding='same',
data_format='channels_first')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size, filters) + input_size)
# Test channels_last
input_val = np.random.random((batch_size,) + input_size + (input_depth,))
x = keras.backend.variable(input_val)
y = keras.backend.conv2d_transpose(
x, k, (batch_size,) + input_size + (filters,),
padding='same',
data_format='channels_last')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test dilation_rate
y = keras.backend.conv2d_transpose(
x, k, (batch_size,) + input_size + (filters,),
padding='same',
data_format='channels_last',
dilation_rate=(2, 2))
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test batch size of None in output_shape
y = keras.backend.conv2d_transpose(x, k, (None,) + input_size + (filters,),
padding='same',
data_format='channels_last')
self.assertEqual(
tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))
# Test invalid values
with self.assertRaises(ValueError):
y = keras.backend.conv2d_transpose(x, k, (2, 2, 8, 9),
padding='other',
data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv2d_transpose(x, k, (2, 2, 8, 9),
data_format='other')
def test_separable_conv2d(self):
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
depthwise_kernel_val = np.random.random((3, 3, 4, 1))
pointwise_kernel_val = np.random.random((1, 1, 4, 5))
dk = keras.backend.variable(depthwise_kernel_val)
pk = keras.backend.variable(pointwise_kernel_val)
y = keras.backend.separable_conv2d(
x, dk, pk, padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(x, dk, pk, (2, 2, 2))
def test_conv3d(self):
val = np.random.random((10, 4, 10, 10, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv3d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.shape.as_list(), [10, 5, 8, 8, 8])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 8, 8, 8, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2))
def test_rnn(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [keras.backend.variable(init_state_val)]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])
self.assertEqual(outputs.shape.as_list(),
[num_samples, timesteps, output_dim])
for state in new_states:
self.assertEqual(state.shape.as_list(), [num_samples, output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertLen(new_states, 1)
state_list[i].append(keras.backend.eval(new_states[0]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
def test_rnn_additional_states(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 2
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output,
keras.backend.concatenate([output, output], axis=-1)]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
additional_state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [
keras.backend.variable(init_state_val),
ops.convert_to_tensor(
np.concatenate([init_state_val, init_state_val], axis=-1))
]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])
self.assertEqual(outputs.shape.as_list(),
[num_samples, timesteps, output_dim])
# for state in new_states:
# self.assertEqual(state.shape.as_list(),
# [num_samples, output_dim])
self.assertEqual(new_states[0].shape.as_list(), [num_samples, output_dim])
self.assertEqual(new_states[1].shape.as_list(),
[num_samples, 2 * output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertLen(new_states, 2)
state_list[i].append(keras.backend.eval(new_states[0]))
additional_state_list[i].append(keras.backend.eval(new_states[1]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(additional_state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
assert_list_pairwise(additional_state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[0], additional_state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[2], additional_state_list[3]):
self.assertAllClose(s, u_s, atol=1e-04)
def test_rnn_output_and_state_masking_independent(self):
num_samples = 2
num_timesteps = 4
state_and_io_size = 2
mask_last_num_timesteps = 2 # for second sample only
# a step function that just outputs inputs,
# but increments states +1 per timestep
def step_function(inputs, states):
return inputs, [s + 1 for s in states]
inputs_vals = np.random.random((num_samples, num_timesteps,
state_and_io_size))
initial_state_vals = np.random.random((num_samples, state_and_io_size))
# masking of two last timesteps for second sample only
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[1, -mask_last_num_timesteps:] = 0
# outputs expected to be same as inputs for the first sample
expected_outputs = inputs_vals.copy()
# but for the second sample all outputs in masked region should be the same
# as last output before masked region
expected_outputs[1, -mask_last_num_timesteps:] = \
expected_outputs[1, -(mask_last_num_timesteps + 1)]
expected_last_state = initial_state_vals.copy()
# first state should be incremented for every timestep (no masking)
expected_last_state[0] += num_timesteps
# second state should not be incremented for last two timesteps
expected_last_state[1] += (num_timesteps - mask_last_num_timesteps)
# verify same expected output for `unroll=true/false`
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, outputs, last_states = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(keras.backend.eval(outputs), expected_outputs)
self.assertAllClose(
keras.backend.eval(last_states[0]), expected_last_state)
def test_rnn_output_num_dim_larger_than_2_masking(self):
num_samples = 3
num_timesteps = 4
num_features = 5
def step_function(inputs, states):
outputs = keras.backend.tile(keras.backend.expand_dims(inputs), [1, 1, 2])
return outputs, [keras.backend.identity(s) for s in states]
# Note: cannot just return states (which can be a problem) ->
# tensorflow/python/ops/resource_variable_ops.py", line 824, in set_shape
# NotImplementedError: ResourceVariable does not implement set_shape()
inputs_vals = np.random.random((num_samples, num_timesteps, num_features))
initial_state_vals = np.random.random((num_samples, 6))
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[-1, -1] = 0 # final timestep masked for last sample
expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)
# for the last sample, the final timestep (in masked region) should be the
# same as the second to final output (before masked region)
expected_outputs[-1, -1] = expected_outputs[-1, -2]
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, outputs, _ = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(keras.backend.eval(outputs), expected_outputs)
def test_rnn_state_num_dim_larger_than_2_masking(self):
num_samples = 3
num_timesteps = 4
def step_function(inputs, states):
return inputs, [s + 1 for s in states]
inputs_vals = np.random.random((num_samples, num_timesteps, 5))
initial_state_vals = np.random.random((num_samples, 6, 7))
mask_vals = np.ones((num_samples, num_timesteps))
mask_vals[0, -2:] = 0 # final two timesteps masked for first sample
expected_last_state = initial_state_vals.copy()
expected_last_state[0] += (num_timesteps - 2)
expected_last_state[1:] += num_timesteps
inputs = keras.backend.variable(inputs_vals)
initial_states = [keras.backend.variable(initial_state_vals)]
mask = keras.backend.variable(mask_vals)
for unroll in [True, False]:
_, _, last_states = keras.backend.rnn(
step_function,
inputs,
initial_states,
mask=mask,
unroll=unroll,
input_length=num_timesteps if unroll else None)
self.assertAllClose(
keras.backend.eval(last_states[0]), expected_last_state)
def test_batch_normalization(self):
g_val = np.random.random((3,))
b_val = np.random.random((3,))
gamma = keras.backend.variable(g_val)
beta = keras.backend.variable(b_val)
# 3D NHC case
val = np.random.random((10, 5, 3))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 1), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 5, 3])
# 4D NHWC case
val = np.random.random((10, 5, 5, 3))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 1, 2), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 5, 5, 3])
# 4D NCHW case
if not context.executing_eagerly():
# Eager CPU kernel for NCHW does not exist.
val = np.random.random((10, 3, 5, 5))
x = keras.backend.variable(val)
mean, var = nn.moments(x, (0, 2, 3), None, None, False)
normed = keras.backend.batch_normalization(
x, mean, var, beta, gamma, axis=1, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 5, 5])
def test_normalize_batch_in_training(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
reduction_axes = (0, 2, 3)
g_val = np.random.random((3,))
b_val = np.random.random((3,))
gamma = keras.backend.variable(g_val)
beta = keras.backend.variable(b_val)
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
# case: gamma=None
gamma = None
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
# case: beta=None
beta = None
normed, mean, var = keras.backend.normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=1e-3)
self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])
self.assertEqual(mean.shape.as_list(), [
3,
])
self.assertEqual(var.shape.as_list(), [
3,
])
def test_dropout(self):
inputs = array_ops.ones((200, 200))
outputs = keras.backend.dropout(inputs, 0.2)
outputs_val = keras.backend.eval(outputs)
self.assertEqual(np.min(outputs_val), 0)
self.assertAllClose(np.count_nonzero(outputs_val), 32000, atol=1000)
# Test noise shape
outputs = keras.backend.dropout(inputs, 0.2, noise_shape=(200, 1))
outputs_val = keras.backend.eval(outputs)
self.assertAllClose(outputs_val[2, :], outputs_val[3, :], atol=1e-5)
class BackendCrossEntropyLossesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss(self):
t = keras.backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
result = keras.backend.categorical_crossentropy(t, p)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .01],
[.05, .06, .94]])
result = keras.backend.categorical_crossentropy(t, p, axis=0)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
result = keras.backend.categorical_crossentropy(t, p, from_logits=True),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
p = keras.backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])
result = keras.backend.categorical_crossentropy(
t, p, from_logits=True, axis=0),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.backend.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# With axis set
o = keras.backend.categorical_crossentropy(t, p, axis=0)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .065, .111], 1e-3)
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.backend.categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
# from logits and axis set
o = keras.backend.categorical_crossentropy(t, p, from_logits=True, axis=0)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, .003, .036], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss(self):
t = keras.backend.constant([0, 1, 2])
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
result = keras.backend.sparse_categorical_crossentropy(t, p)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[.9, .05, .05], [.05, .89, .01],
[.05, .06, .94]])
result = keras.backend.sparse_categorical_crossentropy(t, p, axis=0)
self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)
p = keras.backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
result = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
p = keras.backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])
result = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True, axis=0),
self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.backend.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([0, 1, 2])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# With axis set
with self.assertRaisesRegex(
ValueError,
'Cannot compute sparse categorical crossentropy with `axis=0`'):
o = keras.backend.sparse_categorical_crossentropy(t, p, axis=0)
f = keras.backend.function([t, p], o)
_ = f([t_val, p_val])
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.backend.sparse_categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
# from logits and axis set
with self.assertRaisesRegex(
ValueError,
'Cannot compute sparse categorical crossentropy with `axis=0`'):
o = keras.backend.sparse_categorical_crossentropy(
t, p, from_logits=True, axis=0)
f = keras.backend.function([t, p], o)
_ = f([t_val, p_val])
@test_util.run_all_in_graph_and_eager_modes
class TestCTC(test.TestCase):
def test_ctc_decode(self):
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
dtype=np.float32)
# len max_time_steps array of batch_size x depth matrices
inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]
for t in range(seq_len_0)] + # Pad to max_time_steps = 8
2 * [np.zeros((1, depth), dtype=np.float32)])
inputs = keras.backend.variable(np.asarray(inputs).transpose((1, 0, 2)))
# batch_size length vector of sequence_lengths
input_length = keras.backend.variable(
np.array([seq_len_0], dtype=np.int32))
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
-3.5821197, # output beam 0
-3.777835 # output beam 1
], np.float32)[np.newaxis, :]
decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]
beam_width = 2
top_paths = 2
decode_pred_tf, log_prob_pred_tf = keras.backend.ctc_decode(
inputs,
input_length,
greedy=False,
beam_width=beam_width,
top_paths=top_paths)
self.assertEqual(len(decode_pred_tf), top_paths)
log_prob_pred = keras.backend.eval(log_prob_pred_tf)
for i in range(top_paths):
self.assertTrue(
np.alltrue(
decode_truth[i] == keras.backend.eval(decode_pred_tf[i])))
self.assertAllClose(log_prob_truth, log_prob_pred)
@test_util.run_v1_only('b/120545219')
def test_ctc_batch_cost(self):
with self.cached_session():
label_lens = np.expand_dims(np.asarray([5, 4]), 1)
input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps
loss_log_probs = [3.34211, 5.42262]
# dimensions are batch x time x categories
labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],
dtype=np.float32)
labels = keras.backend.variable(labels, dtype='int32')
inputs = keras.backend.variable(inputs, dtype='float32')
input_lens = keras.backend.variable(input_lens, dtype='int32')
label_lens = keras.backend.variable(label_lens, dtype='int32')
res = keras.backend.eval(
keras.backend.ctc_batch_cost(labels, inputs, input_lens, label_lens))
self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05)
# test when batch_size = 1, that is, one sample only
ref = [3.34211]
input_lens = np.expand_dims(np.asarray([5]), 1)
label_lens = np.expand_dims(np.asarray([5]), 1)
labels = np.asarray([[0, 1, 2, 1, 0]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [
0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436
], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]
],
dtype=np.float32)
k_labels = keras.backend.variable(labels, dtype='int32')
k_inputs = keras.backend.variable(inputs, dtype='float32')
k_input_lens = keras.backend.variable(input_lens, dtype='int32')
k_label_lens = keras.backend.variable(label_lens, dtype='int32')
res = keras.backend.eval(
keras.backend.ctc_batch_cost(k_labels, k_inputs, k_input_lens,
k_label_lens))
self.assertAllClose(res[:, 0], ref, atol=1e-05)
@test_util.run_all_in_graph_and_eager_modes
class TestRandomOps(test.TestCase):
def test_random_normal(self):
np.random.seed(123)
x = keras.backend.random_normal((500, 500))
val = keras.backend.eval(x)
self.assertAllClose(np.mean(val), 0., atol=0.01)
self.assertAllClose(np.std(val), 1., atol=0.01)
def test_random_uniform(self):
np.random.seed(123)
x = keras.backend.random_uniform((500, 500))
val = keras.backend.eval(x)
self.assertAllClose(np.mean(val), 0.5, atol=0.01)
self.assertAllClose(np.max(val), 1., atol=0.01)
self.assertAllClose(np.min(val), 0., atol=0.01)
def test_random_binomial(self):
np.random.seed(123)
x = keras.backend.random_binomial((500, 500), p=0.5)
self.assertAllClose(np.mean(keras.backend.eval(x)), 0.5, atol=0.01)
def test_truncated_normal(self):
np.random.seed(123)
x = keras.backend.truncated_normal((500, 500), mean=0.0, stddev=1.0)
x = keras.backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0)
y = keras.backend.eval(x)
self.assertAllClose(np.mean(y), 0., atol=0.01)
self.assertAllClose(np.std(y), 0.88, atol=0.01)
self.assertAllClose(np.max(y), 2., atol=0.01)
self.assertAllClose(np.min(y), -2., atol=0.01)
@test_util.run_all_in_graph_and_eager_modes
class FunctionTest(test.TestCase):
def test_function_basics(self):
x1 = keras.backend.placeholder(shape=(), dtype='float32')
x2 = keras.backend.placeholder(shape=(), dtype='int32')
v = keras.backend.variable(10.)
y1 = x1 + keras.backend.cast(x2, 'float32') + v
y2 = x1 * keras.backend.cast(x2, 'float32')
with ops.control_dependencies([y1]):
u = keras.backend.update(v, x1)
f = keras.backend.function([x1, x2], [y1, y2], updates=[u])
output_values = f([2, 3])
self.assertEqual(output_values, [15., 6.])
self.assertEqual(keras.backend.eval(v), 2.)
def test_function_dict_outputs(self):
x_ph = keras.backend.placeholder(shape=(), name='x')
y_ph = keras.backend.placeholder(shape=(), name='y')
outputs = {'x*y': y_ph * x_ph, 'x*x': x_ph * x_ph}
f = keras.backend.function(inputs=[x_ph, y_ph], outputs=outputs)
x, y = 2., 5.
results = f([x, y])
self.assertEqual(results['x*y'], 10.)
self.assertEqual(results['x*x'], 4)
def test_function_dict_inputs(self):
placeholders = {
'x': keras.backend.placeholder(shape=()),
'y': keras.backend.placeholder(shape=())
}
outputs = [placeholders['x'] * placeholders['y']]
f = keras.backend.function(inputs=placeholders, outputs=outputs)
results = f({'x': 2., 'y': 3.})
self.assertEqual(results[0], 6.)
def test_function_single_input_output(self):
x_ph = keras.backend.placeholder(shape=(), name='x')
output = x_ph * x_ph
f = keras.backend.function(x_ph, output)
result = f(2.)
self.assertEqual(result, 4.)
def test_tuple_updates(self):
x_ph = keras.backend.placeholder(ndim=2)
v = keras.backend.variable(np.ones((4, 2)))
output = x_ph ** 2 + v
new_v = v + x_ph
f = keras.backend.function(x_ph, output, updates=[(v, new_v)])
input_val = np.random.random((4, 2))
result = f(input_val)
self.assertAllClose(result, input_val ** 2 + 1)
self.assertAllClose(keras.backend.get_value(v), np.ones((4, 2)) + input_val)
class BackendGraphTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_function_placeholder_with_default(self):
with keras.backend.get_graph().as_default():
x1 = array_ops.placeholder_with_default(
np.array(2., dtype='float32'), shape=())
x2 = array_ops.placeholder_with_default(
np.array(3, dtype='int32'), shape=())
y1 = x1 + keras.backend.cast(x2, 'float32')
y2 = x1 * keras.backend.cast(x2, 'float32')
f = keras.backend.function([x1, x2], [y1, y2])
output_values = f([4, 5])
self.assertEqual(output_values, [9., 20.])
output_values = f([None, None])
self.assertEqual(output_values, [5., 6.])
@test_util.run_deprecated_v1
def test_function_tf_feed_symbols(self):
# Test Keras backend functions with TF tensor inputs.
with self.cached_session():
# Test feeding a resource variable to `function`.
x1 = keras.backend.placeholder(shape=())
x2 = keras.backend.placeholder(shape=())
lr = keras.backend.learning_phase() # Include a placeholder_with_default.
y1 = keras.backend.variable(10.)
y2 = 3
f = keras.backend.function(
inputs=[x1, x2, lr],
outputs=[x1 + 1, keras.backend.in_train_phase(x2 + 2, x2 - 1)])
outs = f([y1, y2, None]) # Use default learning_phase value.
self.assertEqual(outs, [11., 2.])
outs = f([y1, y2, 1]) # Set learning phase value.
self.assertEqual(outs, [11., 5.])
# Test triggering a callable refresh by changing the input.
y3 = keras.backend.constant(20.) # Test with tensor
outs = f([y3, y2, None])
self.assertEqual(outs, [21., 2.])
y4 = 4 # Test with non-symbol
outs = f([y4, y2, None])
self.assertEqual(outs, [5., 2.])
# Test with a different dtype
y5 = keras.backend.constant(10., dtype='float64')
outs = f([y5, y2, None])
self.assertEqual(outs, [11., 2.])
@test_util.run_deprecated_v1
def test_function_tf_fetches(self):
# Additional operations can be passed to tf.compat.v1.Session().run() via
# its `fetches` arguments. In contrast to `updates` argument of
# keras.backend.function() these do not have control dependency on `outputs`
# so they can run in parallel. Also they should not contribute to output of
# keras.backend.function().
with self.cached_session():
x = keras.backend.variable(0.)
y = keras.backend.variable(0.)
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
updates=[(x, x_placeholder + 1.)],
fetches=[keras.backend.update(y, 5.)])
output = f([10., 20.])
self.assertEqual(output, [30.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[11., 5.])
@test_util.run_deprecated_v1
def test_function_tf_feed_dict(self):
# Additional substitutions can be passed to `tf.compat.v1.Session().run()`
# via its `feed_dict` arguments. Note that the feed_dict is passed once in
# the constructor but we can modify the values in the dictionary. Through
# this feed_dict we can provide additional substitutions besides Keras
# inputs.
with self.cached_session():
x = keras.backend.variable(0.)
y = keras.backend.variable(0.)
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
feed_dict = {y_placeholder: 3.}
fetches = [keras.backend.update(y, y_placeholder * 10.)]
f = keras.backend.function(
inputs=[x_placeholder],
outputs=[x_placeholder + 1.],
updates=[(x, x_placeholder + 10.)],
feed_dict=feed_dict,
fetches=fetches)
output = f([10.])
self.assertEqual(output, [11.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[20., 30.])
# updated value in feed_dict will be modified within the K.function()
feed_dict[y_placeholder] = 4.
output = f([20.])
self.assertEqual(output, [21.])
self.assertEqual(keras.backend.get_session().run(fetches=[x, y]),
[30., 40.])
@test_util.run_deprecated_v1
def test_function_tf_run_options_with_run_metadata(self):
with self.cached_session():
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
# enable run_options.
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
options=run_options,
run_metadata=run_metadata)
output = f([10., 20.])
self.assertEqual(output, [30.])
self.assertGreater(len(run_metadata.partition_graphs), 0)
# disable run_options.
f1 = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder],
run_metadata=run_metadata)
output1 = f1([10., 20.])
self.assertEqual(output1, [30.])
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_deprecated_v1
def test_function_fetch_callbacks(self):
class CallbackStub(object):
def __init__(self):
self.times_called = 0
self.callback_result = 0
def _fetch_callback(self, result):
self.times_called += 1
self.callback_result = result
with self.cached_session():
callback = CallbackStub()
x_placeholder = keras.backend.placeholder(shape=())
y_placeholder = keras.backend.placeholder(shape=())
callback_op = x_placeholder * y_placeholder
f = keras.backend.function(
inputs=[x_placeholder, y_placeholder],
outputs=[x_placeholder + y_placeholder])
f.fetches.append(callback_op)
f.fetch_callbacks[callback_op] = callback._fetch_callback
_ = f([10., 20.])
self.assertEqual(callback.times_called, 1)
self.assertEqual(callback.callback_result, 200)
def test_get_session_different_graphs(self):
with ops.Graph().as_default():
x = keras.backend.constant(1)
session = keras.backend.get_session()
self.assertIs(session, keras.backend.get_session((x,)))
self.assertIs(session, keras.backend.get_session())
with ops.Graph().as_default():
self.assertIs(session, keras.backend.get_session((x,)))
self.assertIsNot(session, keras.backend.get_session())
@test_util.run_all_in_graph_and_eager_modes
class ControlOpsTests(test.TestCase):
def test_function_switch_basics(self):
x = array_ops.constant(2.0)
y = array_ops.constant(3.0)
def xpowy():
return keras.backend.pow(x, y)
def ypowx():
return keras.backend.pow(y, x)
tensor = keras.backend.switch(keras.backend.less(x, y), xpowy, ypowx)
self.assertEqual(keras.backend.eval(tensor), [8.0])
tensor = keras.backend.switch(keras.backend.greater(x, y), xpowy, ypowx)
self.assertEqual(keras.backend.eval(tensor), [9.0])
def test_unequal_rank(self):
x = ops.convert_to_tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32')
y = ops.convert_to_tensor(np.array([1, 2, 3]), dtype='float32')
def true_func():
return x
def false_func():
return y
with self.assertRaisesRegexp(ValueError,
'Rank of `condition` should be less than'):
keras.backend.switch(keras.backend.equal(x, x), false_func, true_func)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/backend_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend config API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import keras_export
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
@keras_export('keras.backend.epsilon')
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
keras.backend.epsilon() >>>1e-07
```
"""
return _EPSILON
@keras_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example: ```python from keras import backend as K K.epsilon() >>> 1e-07
K.set_epsilon(1e-05) K.epsilon() >>> 1e-05 ```
"""
global _EPSILON
_EPSILON = value
@keras_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
keras.backend.floatx() >>> 'float32'
```
"""
return _FLOATX
@keras_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example: ```python from keras import backend as K K.floatx() >>> 'float32'
K.set_floatx('float16') K.floatx() >>> 'float16' ```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@keras_export('keras.backend.image_data_format')
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
keras.backend.image_data_format() >>> 'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
@keras_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example: ```python from keras import backend as K K.image_data_format() >>>
'channels_first' K.set_image_data_format('channels_last')
K.image_data_format() >>> 'channels_last' ```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
| tensorflow-master | tensorflow/python/keras/backend_config.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` matches the shape of `y_pred`, then the
loss of each measurable element of `y_pred` is scaled by the
corresponding value of `sample_weight`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `y_true`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
scope_name = 'lambda' if self.name == '<lambda>' else self.name
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values, with the same shape as 'y_pred'.
y_pred: The predicted values.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if distribution_strategy_context.has_strategy() and (
self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.reduction.None)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/alpha/tutorials/distribute/training_loops'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean squared error value is 3/4 (0.75).
Usage:
```python
mse = tf.keras.losses.MeanSquaredError()
loss = mse([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean absolute error value is 3/4 (0.75).
Usage:
```python
mae = tf.keras.losses.MeanAbsoluteError()
loss = mae([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean absolute percentage error value is 5e+08.
Usage:
```python
mape = tf.keras.losses.MeanAbsolutePercentageError()
loss = mape([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 5e+08
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean squared logarithmic error value is 0.36034.
Usage:
```python
msle = tf.keras.losses.MeanSquaredLogarithmicError()
loss = msle([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.36034
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
```python
bce = tf.keras.losses.BinaryCrossentropy()
loss = bce([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 11.522857
```
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume
that `y_pred` contains probabilities (i.e., values in [0, 1]).
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we
compute the loss between the predicted labels and a smoothed version of
the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: (Optional) Name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
```python
cce = tf.keras.losses.CategoricalCrossentropy()
loss = cce(
[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]])
print('Loss: ', loss.numpy()) # Loss: 0.3239
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
```python
cce = tf.keras.losses.SparseCategoricalCrossentropy()
loss = cce(
[0, 1, 2],
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]])
print('Loss: ', loss.numpy()) # Loss: 0.3239
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name=None):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
```python
h = tf.keras.losses.Hinge()
loss = h([-1., 1., 1.], [0.6, -0.7, -0.5])
# loss = max(0, 1 - y_true * y_pred) = [1.6 + 1.7 + 1.5] / 3
print('Loss: ', loss.numpy()) # Loss: 1.6
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
```python
sh = tf.keras.losses.SquaredHinge()
loss = sh([-1., 1., 1.], [0.6, -0.7, -0.5])
# loss = (max(0, 1 - y_true * y_pred))^2 = [1.6^2 + 1.7^2 + 1.5^2] / 3
print('Loss: ', loss.numpy()) # Loss: 2.566666
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
Usage:
```python
ch = tf.keras.losses.CategoricalHinge()
loss = ch([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 1.0
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
```python
p = tf.keras.losses.Poisson()
loss = p([1., 9., 2.], [4., 8., 12.])
print('Loss: ', loss.numpy()) # Loss: -0.35702705
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Usage:
```python
l = tf.keras.losses.LogCosh()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.289
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
k = tf.keras.losses.KLDivergence()
loss = k([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error=y_true-y_pred`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
```python
l = tf.keras.losses.Huber()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.333
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
y_pred: The predicted values.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
y_pred: The predicted values.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error=y_true-y_pred`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Arguments:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.log(2.)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): # pylint: disable=missing-docstring
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Usage:
```python
loss = tf.keras.losses.poisson([1.4, 9.3, 2.2], [4.3, 8.2, 12.2])
print('Loss: ', loss.numpy()) # Loss: -0.8045559
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with the mean Poisson loss.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
# Retaining the legacy namespaces: 'cosine_proximity' and 'cosine'.
# TODO(psv): Change name of this function to `cosine_similarity` after fixing
# estimator test.
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_proximity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions."""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` and `y_pred`.
Usage:
```python
cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
# l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
# l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
# l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
# loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
= ((0. + 0.) + (0.5 + 0.5)) / 2
print('Loss: ', loss.numpy()) # Loss: 0.5
```
Usage with tf.keras API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine_similarity = cosine_proximity
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
| tensorflow-master | tensorflow/python/keras/losses.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Constraints: functions that impose constraints on weight values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.constraints.Constraint')
class Constraint(object):
def __call__(self, w):
return w
def get_config(self):
return {}
@keras_export('keras.constraints.MaxNorm', 'keras.constraints.max_norm')
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Arguments:
m: the maximum norm for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
norms = K.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = K.clip(norms, 0, self.max_value)
return w * (desired / (K.epsilon() + norms))
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
@keras_export('keras.constraints.NonNeg', 'keras.constraints.non_neg')
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
"""
def __call__(self, w):
return w * math_ops.cast(math_ops.greater_equal(w, 0.), K.floatx())
@keras_export('keras.constraints.UnitNorm', 'keras.constraints.unit_norm')
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Arguments:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
return w / (
K.epsilon() + K.sqrt(
math_ops.reduce_sum(
math_ops.square(w), axis=self.axis, keepdims=True)))
def get_config(self):
return {'axis': self.axis}
@keras_export('keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm')
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Arguments:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
def __call__(self, w):
norms = K.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * K.clip(norms, self.min_value, self.max_value) +
(1 - self.rate) * norms)
return w * (desired / (K.epsilon() + norms))
def get_config(self):
return {
'min_value': self.min_value,
'max_value': self.max_value,
'rate': self.rate,
'axis': self.axis
}
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
@keras_export('keras.constraints.serialize')
def serialize(constraint):
return serialize_keras_object(constraint)
@keras_export('keras.constraints.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='constraint')
@keras_export('keras.constraints.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret constraint identifier: ' +
str(identifier))
| tensorflow-master | tensorflow/python/keras/constraints.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
ALL_LOSSES = [keras.losses.mean_squared_error,
keras.losses.mean_absolute_error,
keras.losses.mean_absolute_percentage_error,
keras.losses.mean_squared_logarithmic_error,
keras.losses.squared_hinge,
keras.losses.hinge,
keras.losses.categorical_crossentropy,
keras.losses.binary_crossentropy,
keras.losses.kullback_leibler_divergence,
keras.losses.poisson,
keras.losses.cosine_similarity,
keras.losses.logcosh,
keras.losses.categorical_hinge]
class _MSEMAELoss(object):
"""Loss function with internal state, for testing serialization code."""
def __init__(self, mse_fraction):
self.mse_fraction = mse_fraction
def __call__(self, y_true, y_pred, sample_weight=None):
return (self.mse_fraction * keras.losses.mse(y_true, y_pred) +
(1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred))
def get_config(self):
return {'mse_fraction': self.mse_fraction}
class KerasLossesTest(test.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((5, 6, 7)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.shape.as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.random((6, 7)))
y_b = keras.backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.shape.as_list(), [
6,
])
def test_cce_one_hot(self):
with self.cached_session():
y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (5, 6)
y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
y_b = keras.backend.variable(np.random.random((6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (6,)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.losses.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.losses.categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
softmax_output = keras.backend.softmax(logits)
output_from_logit = keras.losses.sparse_categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = keras.losses.sparse_categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_softmax), atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = keras.backend.placeholder()
p = keras.backend.placeholder()
o = keras.losses.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor([0, 1, 2])
p_val = ops.convert_to_tensor([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
o = keras.losses.sparse_categorical_crossentropy(t, p, from_logits=True)
f = keras.backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
@test_util.run_in_graph_and_eager_modes
def test_binary_crossentropy_loss(self):
target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
logits = keras.backend.variable(np.random.random((5, 1)))
sigmoid_output = keras.backend.sigmoid(logits)
output_from_logit = keras.losses.binary_crossentropy(
target, logits, from_logits=True)
output_from_sigmoid = keras.losses.binary_crossentropy(
target, sigmoid_output)
np.testing.assert_allclose(
keras.backend.eval(output_from_logit),
keras.backend.eval(output_from_sigmoid), atol=1e-5)
def test_serialization(self):
fn = keras.losses.get('mse')
config = keras.losses.serialize(fn)
new_fn = keras.losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
[0.1, 0.2, 0.7]]))
y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_serializing_loss_class(self):
orig_loss_class = _MSEMAELoss(0.3)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
serialized = keras.losses.serialize(orig_loss_class)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
deserialized = keras.losses.deserialize(serialized)
assert isinstance(deserialized, _MSEMAELoss)
assert deserialized.mse_fraction == 0.3
def test_serializing_model_with_loss_class(self):
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
model_filename = os.path.join(tmpdir, 'custom_loss.h5')
with self.cached_session():
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loss = _MSEMAELoss(0.3)
inputs = keras.layers.Input((2,))
outputs = keras.layers.Dense(1, name='model_output')(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(optimizer='sgd', loss={'model_output': loss})
model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
if h5py is None:
return
model.save(model_filename)
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
loaded_model = keras.models.load_model(model_filename)
loaded_model.predict(np.random.rand(128, 2))
def test_loss_wrapper(self):
loss_fn = keras.losses.get('mse')
mse_obj = keras.losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__)
self.assertEqual(mse_obj.name, 'mean_squared_error')
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.AUTO)
y_true = constant_op.constant([[1., 9.], [2., 5.]])
y_pred = constant_op.constant([[4., 8.], [12., 3.]])
sample_weight = constant_op.constant([1.2, 0.5])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [5, 52]
# weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
# reduced_weighted_mse = (6 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 16, 1e-2)
def test_invalid_reduction(self):
with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Foo.'):
keras.losses.MeanSquaredError(reduction='Foo')
mse_obj = keras.losses.MeanSquaredError()
y = constant_op.constant([1])
mse_obj.reduction = 'Bar'
with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Bar.'):
mse_obj(y, y)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.SUM, name='mse_1')
self.assertEqual(mse_obj.name, 'mse_1')
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_timestep_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = keras.losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(ValueError,
'weights can not be broadcast to values'):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.SUM, name='mae_1')
self.assertEqual(mae_obj.name, 'mae_1')
self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
def test_scalar_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
def test_sample_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
def test_timestep_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
def test_zero_weighted(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mae_obj = keras.losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegexp(ValueError,
'weights can not be broadcast to values'):
mae_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
def test_sum_reduction(self):
mae_obj = keras.losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = keras.losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.SUM, name='mape_1')
self.assertEqual(mape_obj.name, 'mape_1')
self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
def test_sample_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
def test_zero_weighted(self):
mape_obj = keras.losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_no_reduction(self):
mape_obj = keras.losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [621.8518, 352.6666], 1e-3)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError(
reduction=losses_utils.ReductionV2.SUM, name='mape_1')
self.assertEqual(msle_obj.name, 'mape_1')
self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
def test_sample_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
def test_zero_weighted(self):
msle_obj = keras.losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_util.run_all_in_graph_and_eager_modes
class CosineSimilarityTest(test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = constant_op.constant(self.np_y_true)
self.y_pred = constant_op.constant(self.np_y_pred)
def test_config(self):
cosine_obj = keras.losses.CosineSimilarity(
axis=2, reduction=losses_utils.ReductionV2.SUM, name='cosine_loss')
self.assertEqual(cosine_obj.name, 'cosine_loss')
self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
cosine_obj = keras.losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
cosine_obj = keras.losses.CosineSimilarity()
sample_weight = 2.3
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_sample_weighted(self):
self.setup()
cosine_obj = keras.losses.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=constant_op.constant(sample_weight))
expected_loss = np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
cosine_obj = keras.losses.CosineSimilarity()
np_y_true = self.np_y_true.reshape((2, 3, 1))
np_y_pred = self.np_y_pred.reshape((2, 3, 1))
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
y_true = self.l2_norm(np_y_true, 2)
y_pred = self.l2_norm(np_y_pred, 2)
expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
y_true = constant_op.constant(np_y_true)
y_pred = constant_op.constant(np_y_pred)
loss = cosine_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
expected_loss = np.mean(expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
cosine_obj = keras.losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = keras.losses.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@test_util.run_all_in_graph_and_eager_modes
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = keras.losses.BinaryCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='bce_1')
self.assertEqual(bce_obj.name, 'bce_1')
self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.float32)
bce_obj = keras.losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
bce_obj = keras.losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 / 4
self.assertAlmostEqual(self.evaluate(loss), 3.833, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced loss = (0 + 66.666) / 2
self.assertAlmostEqual(self.evaluate(loss), 33.333, 3)
def test_scalar_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Weighted loss = [0, 15.33 * 2.3, 0, 0]
# Reduced loss = 15.33 * 2.3 / 4
self.assertAlmostEqual(self.evaluate(loss), 8.817, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted loss = [0 * 2.3, 66.666 * 2.3]
# Reduced loss = (0 + 66.666 * 2.3) / 2
self.assertAlmostEqual(self.evaluate(loss), 76.667, 3)
def test_sample_weighted(self):
bce_obj = keras.losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 * 1.2 / 4
self.assertAlmostEqual(self.evaluate(loss), 4.6, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
weights = constant_op.constant([4, 3])
bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, 200 / 3]
# Weighted loss = [0 * 4, 66.666 * 3]
# Reduced loss = (0 + 66.666 * 3) / 2
self.assertAlmostEqual(self.evaluate(loss), 100, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, (200)/3]
self.assertAllClose((0., 66.6666), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = keras.losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = keras.losses.CategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='bce_1')
self.assertEqual(cce_obj.name, 'bce_1')
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = keras.losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
@test_util.run_all_in_graph_and_eager_modes
class SparseCategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = keras.losses.SparseCategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='scc')
self.assertEqual(cce_obj.name, 'scc')
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = keras.losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = keras.losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([0, 1, 2])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = keras.losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = keras.losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[0], [1], [2]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = keras.losses.Hinge(
reduction=losses_utils.ReductionV2.SUM, name='hinge_loss')
self.assertEqual(hinge_obj.name, 'hinge_loss')
self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced loss = (0.6 + 0.4125) / 2
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3)
def test_scalar_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted_loss = [0.6 * 2.3, 0.4125 * 2.3]
# reduced loss = (0.6 + 0.4125) * 2.3 / 2
loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 1.164, 3)
# Verify we get the same output when the same input is given
loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3)
def test_sample_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted loss = [0.6 * 1.2, 0.4125 * 3.4]
# reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.061, 1e-3)
def test_timestep_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = constant_op.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# weighted loss = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]]
# reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 2.012, 1e-3)
def test_zero_weighted(self):
hinge_obj = keras.losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
loss = hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0., 1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = keras.losses.SquaredHinge(
reduction=losses_utils.ReductionV2.SUM, name='sq_hinge_loss')
self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss')
self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced loss = (0.485 + 0.2431) / 2
loss = sq_hinge_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(loss), 0.364, 1e-3)
def test_scalar_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 2.3, 0.2431 * 2.3]
# reduced loss = (0.485 + 0.2431) * 2.3 / 2
loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), 0.837, 1e-3)
# Verify we get the same output when the same input is given
loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 1.2, 0.2431 * 3.4]
# reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2
sample_weight = constant_op.constant([1.2, 3.4])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 0.704, 1e-3)
def test_timestep_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = constant_op.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]]
# weighted loss = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]]
# reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.542, 1e-3)
def test_zero_weighted(self):
sq_hinge_obj = keras.losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0., 1e-3)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = keras.losses.CategoricalHinge(
reduction=losses_utils.ReductionV2.SUM, name='cat_hinge_loss')
self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss')
self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred)
# pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
# neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
# cat_hinge = max(0., neg - pos + 1.) = [0, 65]
# reduced_loss = (0 + 65)/2 = 32.5
self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
def test_scalar_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
# Verify we get the same output when the same input is given
loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
def test_timestep_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
def test_zero_weighted(self):
cat_hinge_obj = keras.losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class LogCoshTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
logcosh_obj = keras.losses.LogCosh(
reduction=losses_utils.ReductionV2.SUM, name='logcosh_loss')
self.assertEqual(logcosh_obj.name, 'logcosh_loss')
self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
logcosh_obj = keras.losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = keras.losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = keras.losses.LogCosh()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = keras.losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = keras.losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class PoissonTest(test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_losses = self.np_y_pred - np.multiply(self.np_y_true,
np.log(self.np_y_pred))
self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
poisson_obj = keras.losses.Poisson(
reduction=losses_utils.ReductionV2.SUM, name='poisson')
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
poisson_obj = keras.losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
poisson_obj = keras.losses.Poisson()
sample_weight = 2.3
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
poisson_obj = keras.losses.Poisson()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
poisson_obj = keras.losses.Poisson()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = poisson_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
poisson_obj = keras.losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class KLDivergenceTest(test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
self.np_y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_losses = np.multiply(self.np_y_true,
np.log(self.np_y_true / self.np_y_pred))
self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
k_obj = keras.losses.KLDivergence(
reduction=losses_utils.ReductionV2.SUM, name='kld')
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
k_obj = keras.losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
k_obj = keras.losses.KLDivergence()
sample_weight = 2.3
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
k_obj = keras.losses.KLDivergence()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
k_obj = keras.losses.KLDivergence()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
expected_losses = np.sum(
np.multiply(y_true, np.log(y_true / y_pred)), axis=-1)
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = k_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
num_timesteps = 3
expected_loss = np.sum(expected_losses * sample_weight) / (
self.batch_size * num_timesteps)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
k_obj = keras.losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class HuberLossTest(test.TestCase):
def huber_loss(self, y_true, y_pred, delta=1.0):
error = y_pred - y_true
abs_error = np.abs(error)
quadratic = np.minimum(abs_error, delta)
linear = np.subtract(abs_error, quadratic)
return np.add(
np.multiply(0.5, np.multiply(quadratic, quadratic)),
np.multiply(delta, linear))
def setup(self, delta=1.0):
self.np_y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
self.np_y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
self.batch_size = 6
self.expected_losses = self.huber_loss(self.np_y_true, self.np_y_pred,
delta)
self.y_pred = constant_op.constant(self.np_y_pred)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
h_obj = keras.losses.Huber(
reduction=losses_utils.ReductionV2.SUM, name='huber')
self.assertEqual(h_obj.name, 'huber')
self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
h_obj = keras.losses.Huber()
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
h_obj = keras.losses.Huber()
loss = h_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
h_obj = keras.losses.Huber()
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
h_obj = keras.losses.Huber()
sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
self.setup()
h_obj = keras.losses.Huber()
y_pred = self.np_y_pred.reshape((2, 3, 1))
y_true = self.np_y_true.reshape((2, 3, 1))
expected_losses = self.huber_loss(y_true, y_pred)
y_pred = constant_op.constant(y_pred)
y_true = constant_op.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = h_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
actual_loss = np.multiply(expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
h_obj = keras.losses.Huber()
sample_weight = 0
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
def test_non_default_delta(self):
self.setup(delta=0.8)
h_obj = keras.losses.Huber(delta=0.8)
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/losses_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class LayerWithLosses(keras.layers.Layer):
def build(self, input_shape):
self.v = self.add_weight(
name='hey',
shape=(),
initializer='ones',
regularizer=keras.regularizers.l1(100))
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
return self.v * inputs
class LayerWithMetrics(keras.layers.Layer):
def build(self, input_shape):
self.mean = keras.metrics.Mean(name='mean_object')
def call(self, inputs):
self.add_metric(
math_ops.reduce_mean(inputs), name='mean_tensor', aggregation='mean')
self.add_metric(self.mean(inputs))
return inputs
class LayerWithTrainingArg(keras.layers.Layer):
def call(self, inputs, training=None):
self.training = training
if training:
return inputs
else:
return 0. * inputs
def add_loss_step(defun):
optimizer = keras.optimizer_v2.adam.Adam()
model = testing_utils.get_model_from_layers([LayerWithLosses()],
input_shape=(10,))
def train_step(x):
with backprop.GradientTape() as tape:
model(x)
assert len(model.losses) == 2
loss = math_ops.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
if defun:
train_step = def_function.function(train_step)
x = array_ops.ones((10, 10))
return train_step(x)
def batch_norm_step(defun):
optimizer = keras.optimizer_v2.adadelta.Adadelta()
model = testing_utils.get_model_from_layers([
keras.layers.BatchNormalization(momentum=0.9),
keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax')
],
input_shape=(10,))
def train_step(x, y):
with backprop.GradientTape() as tape:
y_pred = model(x, training=True)
loss = keras.losses.binary_crossentropy(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss, model(x, training=False)
if defun:
train_step = def_function.function(train_step)
x, y = array_ops.ones((10, 10)), array_ops.ones((10, 1))
return train_step(x, y)
def add_metric_step(defun):
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
model = testing_utils.get_model_from_layers([
LayerWithMetrics(),
keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax')
],
input_shape=(10,))
def train_step(x, y):
with backprop.GradientTape() as tape:
y_pred_1 = model(x)
y_pred_2 = model(2 * x)
y_pred = y_pred_1 + y_pred_2
loss = keras.losses.mean_squared_error(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
assert len(model.metrics) == 2
return [m.result() for m in model.metrics]
if defun:
train_step = def_function.function(train_step)
x, y = array_ops.ones((10, 10)), array_ops.zeros((10, 1))
metrics = train_step(x, y)
assert np.allclose(metrics[0], 1.5)
assert np.allclose(metrics[1], 1.5)
return metrics
@keras_parameterized.run_with_all_model_types
class CustomTrainingLoopTest(keras_parameterized.TestCase):
@parameterized.named_parameters(('add_loss_step', add_loss_step),
('add_metric_step', add_metric_step),
('batch_norm_step', batch_norm_step))
def test_eager_and_tf_function(self, train_step):
eager_result = train_step(defun=False)
fn_result = train_step(defun=True)
self.assertAllClose(eager_result, fn_result)
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_training_arg_propagation(self, defun):
model = testing_utils.get_model_from_layers([LayerWithTrainingArg()],
input_shape=(1,))
def train_step(x):
return model(x), model(x, training=False), model(x, training=True)
if defun:
train_step = def_function.function(train_step)
x = array_ops.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], array_ops.zeros((1, 1)))
self.assertAllClose(results[1], array_ops.zeros((1, 1)))
self.assertAllClose(results[2], array_ops.ones((1, 1)))
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_learning_phase_propagation(self, defun):
class MyModel(keras.layers.Layer):
def __init__(self):
super(MyModel, self).__init__()
self.layer = LayerWithTrainingArg()
def call(self, inputs):
return self.layer(inputs)
model = MyModel()
def train_step(x):
no_learning_phase_out = model(x)
self.assertIsNone(model.layer.training)
with keras.backend.learning_phase_scope(0):
inf_learning_phase_out = model(x)
self.assertEqual(model.layer.training, 0)
with keras.backend.learning_phase_scope(1):
train_learning_phase_out = model(x)
self.assertEqual(model.layer.training, 1)
return [
no_learning_phase_out, inf_learning_phase_out,
train_learning_phase_out
]
if defun:
train_step = def_function.function(train_step)
x = array_ops.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], array_ops.zeros((1, 1)))
self.assertAllClose(results[1], array_ops.zeros((1, 1)))
self.assertAllClose(results[2], array_ops.ones((1, 1)))
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_training_arg_priorities(self, defun):
class MyModel(keras.layers.Layer):
def __init__(self):
super(MyModel, self).__init__()
self.layer = LayerWithTrainingArg()
def call(self, inputs, training=False):
return self.layer(inputs)
model = MyModel()
def train_step(x):
explicit_out = model(x, training=True)
default_out = model(x)
with keras.backend.learning_phase_scope(1):
parent_out = model(x, training=False)
lr_out = model(x)
return [explicit_out, default_out, parent_out, lr_out]
if defun:
train_step = def_function.function(train_step)
x = array_ops.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], array_ops.ones((1, 1)))
self.assertAllClose(results[1], array_ops.zeros((1, 1)))
self.assertAllClose(results[2], array_ops.zeros((1, 1)))
self.assertAllClose(results[3], array_ops.ones((1, 1)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| tensorflow-master | tensorflow/python/keras/custom_training_loop_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.platform import test
class KerasIntegrationTest(keras_parameterized.TestCase):
def _save_and_reload_model(self, model):
self.temp_dir = self.get_temp_dir()
fpath = os.path.join(self.temp_dir,
'test_model_%s' % (random.randint(0, 1e7),))
if context.executing_eagerly():
save_format = 'tf'
else:
if (not isinstance(model, keras.Sequential) and
not model._is_graph_network):
return model # Not supported
save_format = 'h5'
model.save(fpath, save_format=save_format)
model = keras.models.load_model(fpath)
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
def test_vector_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')],
input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_vector_classification_shared_model(self):
# Test that Sequential models that feature internal updates
# and internal losses can be shared.
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
base_model = testing_utils.get_model_from_layers(
[keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5)),
keras.layers.BatchNormalization()],
input_shape=x_train.shape[1:])
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
if not testing_utils.should_run_eagerly():
self.assertEqual(len(model.get_losses_for(None)), 2)
self.assertEqual(len(model.get_updates_for(x)), 2)
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@keras_parameterized.run_all_keras_modes
class SequentialIntegrationTest(KerasIntegrationTest):
def test_sequential_save_and_pop(self):
# Test the following sequence of actions:
# - construct a Sequential model and train it
# - save it
# - load it
# - pop its last layer and add a new layer instead
# - continue training
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.Sequential([
keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train, epochs=1, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
model = self._save_and_reload_model(model)
# TODO(b/134537740): model.pop doesn't update model outputs properly when
# model.outputs is already defined, so just set to `None` for now.
model.inputs = None
model.outputs = None
model.pop()
model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
# TODO(b/134523282): There is an bug with Sequential models, so the model
# must be marked as compiled=False to ensure the next compile goes through.
model._is_compiled = False
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
model = self._save_and_reload_model(model)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
# See b/122473407
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_timeseries_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
layers = [
keras.layers.LSTM(5, return_sequences=True),
keras.layers.GRU(y_train.shape[-1], activation='softmax')
]
model = testing_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_timeseries_classification_sequential_tf_rnn(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
activation='softmax',
dtype=dtypes.float32)))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ImageClassificationIntegrationTest(keras_parameterized.TestCase):
def test_image_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10, 10, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
layers = [
keras.layers.Conv2D(4, 3, padding='same', activation='relu'),
keras.layers.Conv2D(8, 3, padding='same'),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(8, 3, padding='same'),
keras.layers.Flatten(),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
]
model = testing_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@keras_parameterized.run_all_keras_modes
class ActivationV2IntegrationTest(keras_parameterized.TestCase):
"""Tests activation function V2 in model exporting and loading.
This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an
activition function, its model exporting and loading work as expected.
Check b/123041942 for details.
"""
def test_serialization_v2_model(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.Sequential([
keras.layers.Flatten(input_shape=x_train.shape[1:]),
keras.layers.Dense(10, activation=nn.relu),
# To mimic 'tf.nn.softmax' used in TF 2.x.
keras.layers.Dense(y_train.shape[-1], activation=nn.softmax_v2),
])
# Check if 'softmax' is in model.get_config().
last_layer_activation = model.get_layer(index=2).get_config()['activation']
self.assertEqual(last_layer_activation, 'softmax')
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train, epochs=2, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
keras.saving.saved_model.export_saved_model(model, output_path)
loaded_model = keras.saving.saved_model.load_from_saved_model(output_path)
self.assertEqual(model.summary(), loaded_model.summary())
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/integration_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import unittest
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class TestCase(test.TestCase, parameterized.TestCase):
def tearDown(self):
keras.backend.clear_session()
super(TestCase, self).tearDown()
# TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass
# it. Or perhaps make 'subclass' always use a custom build method.
def run_with_all_model_types(
test_or_class=None,
exclude_models=None):
"""Execute the decorated test with all Keras model types.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times - once
for each Keras model type.
The Keras model types are: ['functional', 'subclass', 'sequential']
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active Keras model type. This allows unittests to confirm
the equivalence between different Keras models.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_with_all_model_types(
exclude_models = ['sequential'])
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test tries building a small mlp as both a functional model and as a
subclass model.
We can also annotate the whole class if we want this to apply to all tests in
the class:
```python
@testing_utils.run_with_all_model_types(exclude_models = ['sequential'])
class MyTests(testing_utils.KerasTestCase):
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_models: A collection of Keras model types to not run.
(May also be a single model type not wrapped in a collection).
Defaults to None.
Returns:
Returns a decorator that will run the decorated test method multiple times:
once for each desired Keras model type.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
model_types = ['functional', 'subclass', 'sequential']
params = [('_%s' % model, model) for model in model_types
if model not in nest.flatten(exclude_models)]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, model_type, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if model_type == 'functional':
_test_functional_model_type(f, self, *args, **kwargs)
elif model_type == 'subclass':
_test_subclass_model_type(f, self, *args, **kwargs)
elif model_type == 'sequential':
_test_sequential_model_type(f, self, *args, **kwargs)
else:
raise ValueError('Unknown model type: %s' % (model_type,))
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_functional_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('functional'):
f(test_or_class, *args, **kwargs)
def _test_subclass_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('subclass'):
f(test_or_class, *args, **kwargs)
def _test_sequential_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('sequential'):
f(test_or_class, *args, **kwargs)
def run_all_keras_modes(test_or_class=None,
config=None,
always_skip_v1=False,
always_skip_eager=False):
"""Execute the decorated test with all keras execution modes.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times -
once executing in legacy graph mode, once running eagerly and with
`should_run_eagerly` returning True, and once running eagerly with
`should_run_eagerly` returning False.
If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and
the test will only run twice.
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_all_keras_modes
def test_foo(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test will try compiling & fitting the small functional mlp using all
three Keras execution modes.
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
always_skip_v1: If True, does not try running the legacy graph mode even
when Tensorflow v2 behavior is not enabled.
always_skip_eager: If True, does not execute the decorated test
with eager execution modes.
Returns:
Returns a decorator that will run the decorated test method multiple times.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
params = [('_v2_function', 'v2_function')]
if not always_skip_eager:
params.append(('_v2_eager', 'v2_eager'))
if not (always_skip_v1 or tf2.enabled()):
params.append(('_v1_graph', 'v1_graph'))
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
"""A run of a single test case w/ specified run mode."""
if run_mode == 'v1_graph':
_v1_graph_test(f, self, config, *args, **kwargs)
elif run_mode == 'v2_function':
_v2_graph_functions_test(f, self, *args, **kwargs)
elif run_mode == 'v2_eager':
_v2_eager_test(f, self, *args, **kwargs)
else:
return ValueError('Unknown run mode %s' % run_mode)
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _v1_graph_test(f, test_or_class, config, *args, **kwargs):
with context.graph_mode(), testing_utils.run_eagerly_scope(False):
with test_or_class.test_session(use_gpu=True, config=config):
f(test_or_class, *args, **kwargs)
def _v2_graph_functions_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(False):
f(test_or_class, *args, **kwargs)
def _v2_eager_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(True):
f(test_or_class, *args, **kwargs)
def _test_or_class_decorator(test_or_class, single_method_decorator):
"""Decorate a test or class with a decorator intended for one method.
If the test_or_class is a class:
This will apply the decorator to all test methods in the class.
If the test_or_class is an iterable of already-parameterized test cases:
This will apply the decorator to all the cases, and then flatten the
resulting cross-product of test cases. This allows stacking the Keras
parameterized decorators w/ each other, and to apply them to test methods
that have already been marked with an absl parameterized decorator.
Otherwise, treat the obj as a single method and apply the decorator directly.
Args:
test_or_class: A test method (that may have already been decorated with a
parameterized decorator, or a test class that extends
keras_parameterized.TestCase
single_method_decorator:
A parameterized decorator intended for a single test method.
Returns:
The decorated result.
"""
def _decorate_test_or_class(obj):
if isinstance(obj, collections.Iterable):
return itertools.chain.from_iterable(
single_method_decorator(method) for method in obj)
if isinstance(obj, type):
cls = obj
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix):
setattr(cls, name, single_method_decorator(value))
cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__,
cls.__dict__.copy())
return cls
return single_method_decorator(obj)
if test_or_class is not None:
return _decorate_test_or_class(test_or_class)
return _decorate_test_or_class
| tensorflow-master | tensorflow/python/keras/keras_parameterized.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for Keras models, providing compatibility with other frameworks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.wrappers import scikit_learn
del absolute_import
del division
del print_function
| tensorflow-master | tensorflow/python/keras/wrappers/__init__.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Scikit-learn API wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
INPUT_DIM = 5
HIDDEN_DIM = 5
TRAIN_SAMPLES = 10
TEST_SAMPLES = 5
NUM_CLASSES = 2
BATCH_SIZE = 5
EPOCHS = 1
def build_fn_clf(hidden_dim):
model = keras.models.Sequential()
model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(hidden_dim))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(NUM_CLASSES))
model.add(keras.layers.Activation('softmax'))
model.compile(
optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def assert_classification_works(clf):
np.random.seed(42)
(x_train, y_train), (x_test, _) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
score = clf.score(x_train, y_train, batch_size=BATCH_SIZE)
assert np.isscalar(score) and np.isfinite(score)
preds = clf.predict(x_test, batch_size=BATCH_SIZE)
assert preds.shape == (TEST_SAMPLES,)
for prediction in np.unique(preds):
assert prediction in range(NUM_CLASSES)
proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE)
assert proba.shape == (TEST_SAMPLES, NUM_CLASSES)
assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES))
def build_fn_reg(hidden_dim):
model = keras.models.Sequential()
model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(hidden_dim))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation('linear'))
model.compile(
optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy'])
return model
def assert_regression_works(reg):
np.random.seed(42)
(x_train, y_train), (x_test, _) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
reg.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
score = reg.score(x_train, y_train, batch_size=BATCH_SIZE)
assert np.isscalar(score) and np.isfinite(score)
preds = reg.predict(x_test, batch_size=BATCH_SIZE)
assert preds.shape == (TEST_SAMPLES,)
class ScikitLearnAPIWrapperTest(test.TestCase):
def test_classify_build_fn(self):
with self.cached_session():
clf = keras.wrappers.scikit_learn.KerasClassifier(
build_fn=build_fn_clf,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_classify_class_build_fn(self):
class ClassBuildFnClf(object):
def __call__(self, hidden_dim):
return build_fn_clf(hidden_dim)
with self.cached_session():
clf = keras.wrappers.scikit_learn.KerasClassifier(
build_fn=ClassBuildFnClf(),
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_classify_inherit_class_build_fn(self):
class InheritClassBuildFnClf(keras.wrappers.scikit_learn.KerasClassifier):
def __call__(self, hidden_dim):
return build_fn_clf(hidden_dim)
with self.cached_session():
clf = InheritClassBuildFnClf(
build_fn=None,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_regression_build_fn(self):
with self.cached_session():
reg = keras.wrappers.scikit_learn.KerasRegressor(
build_fn=build_fn_reg,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
def test_regression_class_build_fn(self):
class ClassBuildFnReg(object):
def __call__(self, hidden_dim):
return build_fn_reg(hidden_dim)
with self.cached_session():
reg = keras.wrappers.scikit_learn.KerasRegressor(
build_fn=ClassBuildFnReg(),
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
def test_regression_inherit_class_build_fn(self):
class InheritClassBuildFnReg(keras.wrappers.scikit_learn.KerasRegressor):
def __call__(self, hidden_dim):
return build_fn_reg(hidden_dim)
with self.cached_session():
reg = InheritClassBuildFnReg(
build_fn=None,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/wrappers/scikit_learn_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for using the Scikit-Learn API with Keras models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import types
import numpy as np
from tensorflow.python.keras import losses
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.utils.generic_utils import has_arg
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.util.tf_export import keras_export
class BaseWrapper(object):
"""Base class for the Keras scikit-learn wrapper.
Warning: This class should not be used directly.
Use descendant classes instead.
Arguments:
build_fn: callable function or class instance
**sk_params: model parameters & fitting parameters
The `build_fn` should construct, compile and return a Keras model, which
will then be used to fit/predict. One of the following
three values could be passed to `build_fn`:
1. A function
2. An instance of a class that implements the `__call__` method
3. None. This means you implement a class that inherits from either
`KerasClassifier` or `KerasRegressor`. The `__call__` method of the
present class will then be treated as the default `build_fn`.
`sk_params` takes both model parameters and fitting parameters. Legal model
parameters are the arguments of `build_fn`. Note that like all other
estimators in scikit-learn, `build_fn` should provide default values for
its arguments, so that you could create the estimator without passing any
values to `sk_params`.
`sk_params` could also accept parameters for calling `fit`, `predict`,
`predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).
fitting (predicting) parameters are selected in the following order:
1. Values passed to the dictionary arguments of
`fit`, `predict`, `predict_proba`, and `score` methods
2. Values passed to `sk_params`
3. The default values of the `keras.models.Sequential`
`fit`, `predict`, `predict_proba` and `score` methods
When using scikit-learn's `grid_search` API, legal tunable parameters are
those you could pass to `sk_params`, including fitting parameters.
In other words, you could use `grid_search` to search for the best
`batch_size` or `epochs` as well as the model parameters.
"""
def __init__(self, build_fn=None, **sk_params):
self.build_fn = build_fn
self.sk_params = sk_params
self.check_params(sk_params)
def check_params(self, params):
"""Checks for user typos in `params`.
Arguments:
params: dictionary; the parameters to be checked
Raises:
ValueError: if any member of `params` is not a valid argument.
"""
legal_params_fns = [
Sequential.fit, Sequential.predict, Sequential.predict_classes,
Sequential.evaluate
]
if self.build_fn is None:
legal_params_fns.append(self.__call__)
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
legal_params_fns.append(self.build_fn.__call__)
else:
legal_params_fns.append(self.build_fn)
for params_name in params:
for fn in legal_params_fns:
if has_arg(fn, params_name):
break
else:
if params_name != 'nb_epoch':
raise ValueError('{} is not a legal parameter'.format(params_name))
def get_params(self, **params): # pylint: disable=unused-argument
"""Gets parameters for this estimator.
Arguments:
**params: ignored (exists for API compatibility).
Returns:
Dictionary of parameter names mapped to their values.
"""
res = copy.deepcopy(self.sk_params)
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
"""Sets the parameters of this estimator.
Arguments:
**params: Dictionary of parameter names mapped to their values.
Returns:
self
"""
self.check_params(params)
self.sk_params.update(params)
return self
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Arguments:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
"""
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
if (losses.is_categorical_crossentropy(self.model.loss) and
len(y.shape) != 2):
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(x, y, **fit_args)
return history
def filter_sk_params(self, fn, override=None):
"""Filters `sk_params` and returns those in `fn`'s arguments.
Arguments:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res
@keras_export('keras.wrappers.scikit_learn.KerasClassifier')
class KerasClassifier(BaseWrapper):
"""Implementation of the scikit-learn classifier API for Keras.
"""
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Arguments:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
Raises:
ValueError: In case of invalid shape for `y` argument.
"""
y = np.array(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
return super(KerasClassifier, self).fit(x, y, **kwargs)
def predict(self, x, **kwargs):
"""Returns the class predictions for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
preds: array-like, shape `(n_samples,)`
Class predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
classes = self.model.predict_classes(x, **kwargs)
return self.classes_[classes]
def predict_proba(self, x, **kwargs):
"""Returns class probability estimates for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
In the case of binary classification,
to match the scikit-learn API,
will return an array of shape `(n_samples, 2)`
(instead of `(n_sample, 1)` as in Keras).
"""
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict_proba(x, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
def score(self, x, y, **kwargs):
"""Returns the mean accuracy on the given test data and labels.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
Raises:
ValueError: If the underlying model isn't configured to
compute accuracy. You should pass `metrics=["accuracy"]` to
the `.compile()` method of the model.
"""
y = np.searchsorted(self.classes_, y)
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(x, y, **kwargs)
if not isinstance(outputs, list):
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name in ['accuracy', 'acc']:
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@keras_export('keras.wrappers.scikit_learn.KerasRegressor')
class KerasRegressor(BaseWrapper):
"""Implementation of the scikit-learn regressor API for Keras.
"""
def predict(self, x, **kwargs):
"""Returns predictions for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict`.
Returns:
preds: array-like, shape `(n_samples,)`
Predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
"""Returns the mean loss on the given test data and labels.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
"""
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
| tensorflow-master | tensorflow/python/keras/wrappers/scikit_learn.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention layers that can be used in sequence DNN/CNN models.
This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
Attention is formed by three tensors: Query, Key and Value.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class BaseDenseAttention(Layer):
"""Base Attention class for Dense networks.
This class is suitable for Dense or CNN networks, and not for RNN networks.
Implementations of attention mechanisms should inherit from this class, and
reuse the `apply_attention_scores()` method.
Args:
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
"""
def __init__(self, causal=False, **kwargs):
super(BaseDenseAttention, self).__init__(**kwargs)
self.causal = causal
self.supports_masking = True
def _calculate_scores(self, query, key):
"""Calculates attention scores.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
return NotImplementedError
def _apply_scores(self, scores, value, scores_mask=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
`[batch_size, Tq, Tv]`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must contain
at least one `True` value in each line along the last dimension.
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if scores_mask is not None:
padding_mask = math_ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
attention_distribution = nn.softmax(scores)
return math_ops.matmul(attention_distribution, value)
# TODO(b/125916026): Consider exposing a __call__ method with named args.
def call(self, inputs, mask=None):
self._validate_call_args(inputs=inputs, mask=mask)
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = array_ops.expand_dims(v_mask, axis=-2)
if self.causal:
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the future
# into the past.
scores_shape = array_ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
causal_mask_shape = array_ops.concat(
[array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]],
axis=0)
causal_mask = _lower_triangular_mask(causal_mask_shape)
else:
causal_mask = None
scores_mask = _merge_masks(v_mask, causal_mask)
result = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = array_ops.expand_dims(q_mask, axis=-1)
result *= math_ops.cast(q_mask, dtype=result.dtype)
return result
def compute_mask(self, inputs, mask=None):
self._validate_call_args(inputs=inputs, mask=mask)
if mask:
q_mask = mask[0]
if q_mask is None:
return None
return ops.convert_to_tensor(q_mask)
return None
def _validate_call_args(self, inputs, mask):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
'{} layer must be called on a list of inputs, namely [query, value] '
'or [query, value, key].'.format(class_name))
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
'{} layer accepts inputs list of length 2 or 3, '
'namely [query, value] or [query, value, key]. '
'Given length: {}'.format(class_name, len(inputs)))
if mask:
if not isinstance(mask, list):
raise ValueError(
'{} layer mask must be a list, '
'namely [query_mask, value_mask].'.format(class_name))
if len(mask) != 2:
raise ValueError(
'{} layer mask must be a list of length 2, namely [query_mask, '
'value_mask]. Given length: {}'.format(class_name, len(mask)))
@keras_export('keras.layers.Attention')
class Attention(BaseDenseAttention):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
product: `scores = tf.matmul(query, key, transpose_b=True)`.
2. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
3. Use `distribution` to create a linear combination of `value` with
shape `batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the attention
scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `Attention` in a CNN+Attention network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(query_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.Attention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
"""Creates scale variable if use_scale==True."""
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=(),
initializer=init_ops.ones_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(Attention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
scores = math_ops.matmul(query, key, transpose_b=True)
if self.scale is not None:
scores *= self.scale
return scores
@keras_export('keras.layers.AdditiveAttention')
class AdditiveAttention(BaseDenseAttention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]`
and `[batch_size, 1, Tv, dim]` respectively.
2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear
sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)`
3. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
4. Use `distribution` to create a linear combination of `value` with
shape `batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a variable to scale the attention scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `AdditiveAttention` in a CNN+Attention
network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(query_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.AdditiveAttention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=True, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
v_shape = tensor_shape.TensorShape(input_shape[1])
dim = v_shape[-1]
if isinstance(dim, tensor_shape.Dimension):
dim = dim.value
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=[dim],
initializer=init_ops.glorot_uniform_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(AdditiveAttention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = array_ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = array_ops.expand_dims(key, axis=-3)
if self.use_scale:
scale = self.scale
else:
scale = 1.
return math_ops.reduce_sum(
scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)
def _lower_triangular_mask(shape):
"""Creates a lower-triangular boolean mask over the last 2 dimensions."""
row_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
col_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
return math_ops.greater_equal(row_index, col_index)
def _merge_masks(x, y):
if x is None:
return y
if y is None:
return x
return math_ops.logical_and(x, y)
| tensorflow-master | tensorflow/python/keras/layers/dense_attention.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for advanced activation layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class AdvancedActivationsTest(keras_parameterized.TestCase):
def test_leaky_relu(self):
for alpha in [0., .5, -1.]:
testing_utils.layer_test(keras.layers.LeakyReLU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def test_prelu(self):
testing_utils.layer_test(keras.layers.PReLU, kwargs={},
input_shape=(2, 3, 4))
def test_prelu_share(self):
testing_utils.layer_test(keras.layers.PReLU,
kwargs={'shared_axes': 1},
input_shape=(2, 3, 4))
def test_elu(self):
for alpha in [0., .5, -1.]:
testing_utils.layer_test(keras.layers.ELU,
kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def test_thresholded_relu(self):
testing_utils.layer_test(keras.layers.ThresholdedReLU,
kwargs={'theta': 0.5},
input_shape=(2, 3, 4))
def test_softmax(self):
testing_utils.layer_test(keras.layers.Softmax,
kwargs={'axis': 1},
input_shape=(2, 3, 4))
def test_relu(self):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': 10},
input_shape=(2, 3, 4))
x = keras.backend.ones((3, 4))
if not context.executing_eagerly():
# Test that we use `leaky_relu` when appropriate in graph mode.
self.assertTrue(
'LeakyRelu' in keras.layers.ReLU(negative_slope=0.2)(x).name)
# Test that we use `relu` when appropriate in graph mode.
self.assertTrue('Relu' in keras.layers.ReLU()(x).name)
# Test that we use `relu6` when appropriate in graph mode.
self.assertTrue('Relu6' in keras.layers.ReLU(max_value=6)(x).name)
def test_relu_with_invalid_arg(self):
with self.assertRaisesRegexp(
ValueError, 'max_value of Relu layer cannot be negative value: -10'):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': -10},
input_shape=(2, 3, 4))
with self.assertRaisesRegexp(
ValueError,
'negative_slope of Relu layer cannot be negative value: -2'):
with self.cached_session():
testing_utils.layer_test(
keras.layers.ReLU,
kwargs={'negative_slope': -2},
input_shape=(2, 3, 4))
@keras_parameterized.run_with_all_model_types
def test_layer_as_activation(self):
layer = keras.layers.Dense(1, activation=keras.layers.ReLU())
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/advanced_activations_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import nest
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple('NestedInput', ['t1', 't2'])
NestedState = collections.namedtuple('NestedState', ['s1', 's2'])
@keras_parameterized.run_all_keras_modes
class RNNTest(keras_parameterized.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(16),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)
self.assertEqual(
layer.compute_output_shape((time_step, None,
embedding_dim)).as_list(),
[time_step, None, units])
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])))
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)(mask)
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
# Test layer output
x = keras.Input((time_step, embedding_dim))
rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
y = rnn_1(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
x_np = np.random.random((batch, time_step, embedding_dim))
y_np_1 = model.predict(x_np)
time_major = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
rnn_2 = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)
y_2 = rnn_2(time_major)
y_2 = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(y_2)
model_2 = keras.models.Model(x, y_2)
rnn_2.set_weights(rnn_1.get_weights())
y_np_2 = model_2.predict(x_np)
self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# test flat list inputs.
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_non_keras_constants(self):
# Test basic case.
x = keras.Input((None, 5))
c = array_ops.zeros([6, 3], dtype=dtypes.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_non_keras_constants_and_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = array_ops.zeros([6, 3], dtype=dtypes.float32)
s = array_ops.zeros([6, 32], dtype=dtypes.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
s = [array_ops.zeros([6, 8], dtype=dtypes.float32),
array_ops.zeros([6, 12], dtype=dtypes.float32),
array_ops.zeros([6, 32], dtype=dtypes.float32)]
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
if context.executing_eagerly():
self.skipTest('reduce_sum is not available in eager mode.')
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = math_ops.reduce_sum(x)
loss_2 = math_ops.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `get_updates_for` and `updates`
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
x = keras.Input((None, 1))
_ = layer(x)
update_1 = state_ops.assign_add(cells[0].kernel,
x[0, 0, 0] * cells[0].kernel)
update_2 = state_ops.assign_add(cells[0].kernel,
array_ops.ones_like(cells[0].kernel))
# TODO(b/128682878): Remove when RNNCells are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
cells[0].add_update(update_1, inputs=x)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for(x)), 1)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
input1 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units,
return_state=True,
return_sequences=True,
dropout=0.2)
state = layer(input1)[1:]
input2 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim))]
model.predict(inputs)
def test_builtin_rnn_cell_serialization(self):
for cell_class in [keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell]:
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8),
cell_class(12),
cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer=[rnn_v1.SimpleRNN, rnn_v1.GRU, rnn_v1.LSTM,
rnn_v2.GRU, rnn_v2.LSTM],
unroll=[True, False]))
def test_rnn_dropout(self, layer, unroll):
rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = rnn_layer(x)
model = keras.models.Model(x, y)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
cell=[keras.layers.SimpleRNNCell, keras.layers.GRUCell,
keras.layers.LSTMCell],
unroll=[True, False]))
def test_stacked_rnn_dropout(self, cell, unroll):
cells = [cell(3, dropout=0.1, recurrent_dropout=0.1),
cell(3, dropout=0.1, recurrent_dropout=0.1)]
layer = keras.layers.RNN(cells, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_dropout_mask_reuse(self):
# The layer is created with recurrent_initializer = zero, so that the
# the recurrent state won't affect the output. By doing this, we can verify
# the output and see if the same mask is applied to for each timestep.
rnn = keras.layers.SimpleRNN(3,
dropout=0.5,
kernel_initializer='ones',
recurrent_initializer='zeros',
return_sequences=True,
unroll=True)
inputs = constant_op.constant(1.0, shape=(6, 2, 5))
out = rnn(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables_lib.global_variables_initializer())
batch_1 = self.evaluate(out)
batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
self.assertAllClose(batch_1_t0, batch_1_t1)
# This simulate the layer called with multiple batches in eager mode
if context.executing_eagerly():
out2 = rnn(inputs, training=True)
else:
out2 = out
batch_2 = self.evaluate(out2)
batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
self.assertAllClose(batch_2_t0, batch_2_t1)
# Also validate that different dropout is used by between batches.
self.assertNotAllClose(batch_1_t0, batch_2_t0)
self.assertNotAllClose(batch_1_t1, batch_2_t1)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3),
keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(cells, return_state=True, return_sequences=True)
output_shape = layer.compute_output_shape((None, timesteps, embedding_dim))
expected_output_shape = [(None, timesteps, 6),
(None, 3),
(None, 3),
(None, 6),
(None, 6)]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
# Test reverse_state_order = True for stacked cell.
stacked_cell = keras.layers.StackedRNNCells(
cells, reverse_state_order=True)
layer = keras.layers.RNN(
stacked_cell, return_state=True, return_sequences=True)
output_shape = layer.compute_output_shape((None, timesteps, embedding_dim))
expected_output_shape = [(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3)]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
def test_trackable_dependencies(self):
rnn = keras.layers.SimpleRNN
x = np.random.random((2, 2, 2))
y = np.random.random((2, 2))
model = keras.models.Sequential()
model.add(rnn(2))
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_high_dimension_RNN(self):
# Basic test case.
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b])
if not context.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, unit_a, unit_b])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Test stacking.
cells = [
Minimal2DRNNCell(unit_a, unit_b),
Minimal2DRNNCell(unit_a * 2, unit_b * 2),
Minimal2DRNNCell(unit_a * 4, unit_b * 4)
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)))
self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
def test_high_dimension_RNN_with_init_state(self):
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
# Basic test case.
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((unit_a, unit_b))
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s)
model = keras.models.Model([x, s], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b))
], np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Bad init state shape.
bad_shape_a = unit_a * 2
bad_shape_b = unit_b * 2
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((bad_shape_a, bad_shape_b))
layer = keras.layers.RNN(cell)
with self.assertRaisesWithPredicateMatch(ValueError,
'however `cell.state_size` is'):
layer(x, initial_state=s)
def test_inconsistent_output_state_size(self):
batch = 32
time_step = 4
state_size = 5
input_size = 6
cell = PlusOneRNNCell(state_size)
x = keras.Input((None, input_size))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size, state_size)
if not context.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, state_size])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_size)),
np.zeros((batch, input_size)))
self.assertEqual(model.output_shape, (None, input_size))
def test_get_initial_state(self):
cell = keras.layers.SimpleRNNCell(5)
with self.assertRaisesRegexp(ValueError,
'batch_size and dtype cannot be None'):
cell.get_initial_state(None, None, None)
if not context.executing_eagerly():
inputs = keras.Input((None, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
else:
batch = 8
inputs = np.random.random((batch, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [8, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [batch, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def test_nested_input_output(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn((input_1, input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model((input_1, input_2), outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
def test_nested_input_output_with_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn((input_1, input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_nest_input_output_with_init_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
output1, output2, s1, s2 = rnn((input_1, input_2),
initial_state=(init_s1, init_s2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2, init_s1, init_s2],
[output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
init_state = NestedState(s1=init_s1, s2=init_s2)
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2),
initial_state=init_state)
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2, init_s1, init_s2],
[output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_peephole_lstm_cell(self):
def _run_cell(cell_fn, **kwargs):
inputs = array_ops.one_hot([1, 2, 3, 4], 4)
cell = cell_fn(5, **kwargs)
cell.build(inputs.shape)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=4, dtype=dtypes.float32)
inputs, _ = cell(inputs, initial_state)
output = inputs
if not context.executing_eagerly():
self.evaluate(variables_lib.global_variables_initializer())
output = self.evaluate(output)
return output
random_seed.set_random_seed(12345)
# `recurrent_activation` kwarg is set to sigmoid as that is hardcoded into
# rnn_cell.LSTMCell.
no_peephole_output = _run_cell(
keras.layers.LSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=1)
first_implementation_output = _run_cell(
keras.layers.PeepholeLSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=1)
second_implementation_output = _run_cell(
keras.layers.PeepholeLSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=2)
tf_lstm_cell_output = _run_cell(
rnn_cell.LSTMCell,
use_peepholes=True,
initializer=init_ops.ones_initializer)
self.assertNotAllClose(first_implementation_output, no_peephole_output)
self.assertAllClose(first_implementation_output,
second_implementation_output)
self.assertAllClose(first_implementation_output, tf_lstm_cell_output)
def test_masking_rnn_with_output_and_states(self):
class Cell(keras.layers.Layer):
def __init__(self):
self.state_size = None
self.output_size = None
super(Cell, self).__init__()
def build(self, input_shape):
self.state_size = input_shape[-1]
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs, [s + 1 for s in states]
x = keras.Input((3, 1), name='x')
x_masked = keras.layers.Masking()(x)
s_0 = keras.Input((1,), name='s_0')
y, s = keras.layers.RNN(
Cell(), return_state=True)(x_masked, initial_state=s_0)
model = keras.models.Model([x, s_0], [y, s])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# last time step masked
x_np = np.array([[[1.], [2.], [0.]]])
s_0_np = np.array([[10.]])
y_np, s_np = model.predict([x_np, s_0_np])
# 1 is added to initial state two times
self.assertAllClose(s_np, s_0_np + 2)
# Expect last output to be the same as last output before masking
self.assertAllClose(y_np, x_np[:, 1, :])
def test_zero_output_for_masking(self):
for unroll in [True, False]:
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((5, 5))
mask = keras.layers.Masking()
layer = keras.layers.RNN(
cell, return_sequences=True, zero_output_for_mask=True, unroll=unroll)
masked_input = mask(x)
y = layer(masked_input)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
np_x = np.ones((6, 5, 5))
result_1 = model.predict(np_x)
# set the time 4 and 5 for last record to be zero (masked).
np_x[5, 3:] = 0
result_2 = model.predict(np_x)
# expect the result_2 has same output, except the time 4,5 for last
# record.
result_1[5, 3:] = 0
self.assertAllClose(result_1, result_2)
def test_unroll_single_step(self):
"""Even if the time dimension is only one, we should be able to unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((1, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
np_x = np.ones((6, 1, 5))
result = model.predict(np_x)
self.assertEqual((6, 1, 5), result.shape)
def test_unroll_zero_step(self):
"""If the time dimension is None, we should fail to unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
with self.assertRaisesRegexp(ValueError, 'Cannot unroll a RNN.*'):
layer(x)
def test_full_input_spec(self):
# See https://github.com/tensorflow/tensorflow/issues/25985
inputs = keras.layers.Input(batch_shape=(1, 1, 1))
state_h = keras.layers.Input(batch_shape=(1, 1))
state_c = keras.layers.Input(batch_shape=(1, 1))
states = [state_h, state_c]
decoder_out = keras.layers.LSTM(1, stateful=True)(
inputs,
initial_state=states
)
model = keras.Model([inputs, state_h, state_c], decoder_out)
model.reset_states()
def test_reset_states(self):
# See https://github.com/tensorflow/tensorflow/issues/25852
with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
simple_rnn = keras.layers.SimpleRNN(1, stateful=True)
simple_rnn.reset_states()
with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
cell = Minimal2DRNNCell(1, 2)
custom_rnn = keras.layers.RNN(cell, stateful=True)
custom_rnn.reset_states()
@parameterized.parameters(
[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell])
def test_stateful_rnn_with_stacking(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/28614.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 64
cells = [cell(32), cell(64)]
x = keras.Input(batch_shape=(batch, None, input_dim))
layer = keras.layers.RNN(cells, stateful=True)
y = layer(x)
model = keras.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))
model.predict(np.ones((batch, timesteps, input_dim)))
model.reset_states()
model.predict(np.ones((batch, timesteps, input_dim)))
new_states = nest.map_structure(lambda s: np.ones((batch, s)),
layer.cell.state_size)
layer.reset_states(new_states)
model.predict(np.ones((batch, timesteps, input_dim)))
def test_input_dim_length(self):
simple_rnn = keras.layers.SimpleRNN(5, input_length=10, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, None, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_length=10)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, None))
@parameterized.parameters(
[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell])
def test_state_spec_with_stack_cell(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/27817 for more detail.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 8
def create_cell():
return [cell(output_dim),
cell(output_dim),
cell(output_dim)]
inputs = keras.Input((timesteps, input_dim))
encoder_output = keras.layers.RNN(create_cell(), return_state=True)(inputs)
states = encoder_output[1:]
decoder_output = keras.layers.RNN(
create_cell())(inputs, initial_state=states)
model = keras.models.Model(inputs, decoder_output)
model.compile(optimizer='rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))
model.predict(np.ones((batch, timesteps, input_dim)))
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Minimal2DRNNCell(keras.layers.Layer):
"""The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell.
Both internal state and output have 2 dimensions and are orthogonal
between each other.
"""
def __init__(self, unit_a, unit_b, **kwargs):
self.unit_a = unit_a
self.unit_b = unit_b
self.state_size = tensor_shape.as_shape([unit_a, unit_b])
self.output_size = tensor_shape.as_shape([unit_a, unit_b])
super(Minimal2DRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
input_a = input_shape[-2]
input_b = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_a, input_b, self.unit_a, self.unit_b),
initializer='uniform',
name='kernel')
self.recurring_kernel = self.add_weight(
shape=(self.unit_a, self.unit_b, self.unit_a, self.unit_b),
initializer='uniform',
name='recurring_kernel')
self.bias = self.add_weight(
shape=(self.unit_a, self.unit_b), initializer='uniform', name='bias')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = special_math_ops.einsum('bij,ijkl->bkl', inputs, self.kernel)
h += array_ops.expand_dims(self.bias, axis=0)
output = h + special_math_ops.einsum('bij,ijkl->bkl', prev_output,
self.recurring_kernel)
return output, [output]
class PlusOneRNNCell(keras.layers.Layer):
"""Add one to the input and state.
This cell is used for testing state_size and output_size."""
def __init__(self, num_unit, **kwargs):
self.state_size = num_unit
super(PlusOneRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs + 1, [states[0] + 1]
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, use_tuple=False, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.use_tuple = use_tuple
super(NestedCell, self).__init__(**kwargs)
# A nested state.
if use_tuple:
self.state_size = NestedState(
s1=unit_1, s2=tensor_shape.TensorShape([unit_2, unit_3]))
else:
self.state_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3]))
self.output_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3]))
def build(self, inputs_shape):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
if self.use_tuple:
input_1 = inputs_shape.t1[1]
input_2, input_3 = inputs_shape.t2[1:]
else:
input_1 = inputs_shape[0][1]
input_2, input_3 = inputs_shape[1][1:]
self.kernel_1 = self.add_weight(
shape=(input_1, self.unit_1), initializer='uniform', name='kernel_1')
self.kernel_2_3 = self.add_weight(
shape=(input_2, input_3, self.unit_2, self.unit_3),
initializer='uniform',
name='kernel_2_3')
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
flatten_inputs = nest.flatten(inputs)
s1, s2 = states
output_1 = math_ops.matmul(flatten_inputs[0], self.kernel_1)
output_2_3 = special_math_ops.einsum('bij,ijkl->bkl', flatten_inputs[1],
self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = [output_1, output_2_3]
new_states = NestedState(s1=state_1, s2=state_2_3)
return output, new_states
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/recurrent_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merge layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class MergeLayersTest(keras_parameterized.TestCase):
def test_merge_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
add_layer = keras.layers.Add()
o = add_layer([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
self.assertEqual(
add_layer.compute_mask([i1, i2, i3], [None, None, None]), None)
self.assertTrue(
np.all(
K.eval(
add_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
add_layer.compute_mask([i1, i2, i3], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
add_layer.compute_mask(i1, [None, None, None])
with self.assertRaisesRegexp(ValueError, " should have the same length."):
add_layer.compute_mask([i1, i2, i3], [None, None])
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
subtract_layer = keras.layers.Subtract()
o = subtract_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 - x2, atol=1e-4)
self.assertEqual(subtract_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
K.eval(
subtract_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
subtract_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
subtract_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegexp(ValueError,
"layer should be called on exactly 2 inputs"):
subtract_layer([i1, i2, i3])
with self.assertRaisesRegexp(ValueError,
"layer should be called on exactly 2 inputs"):
subtract_layer([i1])
def test_merge_multiply(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
def test_merge_average(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_merge_maximum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
def test_merge_minimum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.minimum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
def test_merge_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
concat_layer = keras.layers.Concatenate(axis=1)
o = concat_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
self.assertEqual(concat_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
K.eval(
concat_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
concat_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
concat_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegexp(ValueError, "should have the same length"):
concat_layer.compute_mask([i1, i2], [None])
with self.assertRaisesRegexp(ValueError,
"layer should be called on a list of inputs"):
concat_layer(i1)
def test_merge_dot(self):
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer.compute_output_shape([(4, 5), (4, 5)]), (4, 1))
@tf_test_util.run_all_in_graph_and_eager_modes
class MergeLayersTestNoExecution(test.TestCase):
def test_merge_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add([i1])
with self.assertRaises(ValueError):
keras.layers.add(i1)
with self.assertRaises(ValueError):
keras.layers.add([i1])
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate(i1, axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate([i1], axis=-1)
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
with self.assertRaises(ValueError):
dot = keras.layers.Dot(1)
dot.compute_output_shape(1)
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
y = keras.layers.subtract([i1, i2])
self.assertEqual(y.shape.as_list(), [None, 4, 5])
# Test invalid use cases
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i2])
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i1, i1])
def test_merge_add_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
def test_merge_add_dynamic_shape(self):
i1 = keras.Input(batch_shape=(4, None), dtype='float32')
i2 = keras.Input(batch_shape=(4, 5), dtype='float32')
layer = keras.layers.Add()
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [4, 5])
def test_merge_concatenate_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Concatenate()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 10])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/merge_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for locally-connected layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
_DATA_FORMAT_PADDING_IMPLEMENTATION = [{
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 2
}]
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
for strides in [1]:
if padding == 'same' and strides != 1:
continue
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'padding': padding,
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected1D,
kwargs=kwargs,
input_shape=(num_samples, num_steps, input_dim))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d_regularization(self, data_format, padding,
implementation):
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_steps, input_dim))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
kwargs = {
'filters': filters,
'kernel_size': 3,
'padding': padding,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_channels_first(self, data_format, padding,
implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
kwargs = {
'filters': filters,
'kernel_size': 3,
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_regularization(self, data_format, padding,
implementation):
num_samples = 2
filters = 3
stack_size = 4
num_row = 6
num_col = 7
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_row, num_col, stack_size))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnectedImplementationModeTest(test.TestCase,
parameterized.TestCase):
@parameterized.parameters(['channels_first', 'channels_last'])
def test_locallyconnected_implementation(self, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
for width in [1, 6]:
for height in [7]:
for filters in [2]:
inputs = get_inputs(data_format, filters, height, num_samples,
width)
for kernel_x in [(3,)]:
for kernel_y in [()] if width == 1 else [(2,)]:
for stride_x in [(1,)]:
for stride_y in [()] if width == 1 else [(3,)]:
for layers in [2]:
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model(implementation=1, **kwargs)
model_2 = get_model(implementation=2, **kwargs)
# Build models.
model_1.train_on_batch(inputs, targets)
model_2.train_on_batch(inputs, targets)
# Copy weights.
copy_model_weights(model_2, model_1)
# Compare outputs at initialization.
out_1 = model_1.call(inputs)
out_2 = model_2.call(inputs)
self.assertAllCloseAccordingToType(
out_1, out_2, rtol=1e-5, atol=1e-5)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples)
# Compare outputs after a few training steps.
out_1 = model_1.call(inputs)
out_2 = model_2.call(inputs)
self.assertAllCloseAccordingToType(
out_1, out_2, atol=2e-4)
def test_make_2d(self):
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
np.random.seed(1)
for input_shape in input_shapes:
inputs = np.random.normal(0, 1, input_shape)
inputs_tf = keras.backend.variable(inputs)
split_dim = np.random.randint(0, inputs.ndim + 1)
shape_2d = (int(np.prod(inputs.shape[:split_dim])),
int(np.prod(inputs.shape[split_dim:])))
inputs_2d = np.reshape(inputs, shape_2d)
inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim)
inputs_2d_tf = keras.backend.get_value(inputs_2d_tf)
self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf)
def get_inputs(data_format, filters, height, num_samples, width):
if data_format == 'channels_first':
if width == 1:
input_shape = (filters, height)
else:
input_shape = (filters, height, width)
elif data_format == 'channels_last':
if width == 1:
input_shape = (height, filters)
else:
input_shape = (height, width, filters)
else:
raise NotImplementedError(data_format)
inputs = np.random.normal(0, 1,
(num_samples,) + input_shape).astype(np.float32)
return inputs
def xent(y_true, y_pred):
y_true = keras.backend.cast(
keras.backend.reshape(y_true, (-1,)),
keras.backend.dtypes_module.int32)
return keras.backend.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true,
logits=y_pred)
def get_model(implementation,
filters,
kernel_size,
strides,
layers,
num_classes,
data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=RMSPropOptimizer(0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent
)
return model
def copy_lc_weights(lc_layer_2_from, lc_layer_1_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
data_format = lc_layer_2_from.data_format
if data_format == 'channels_first':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (3, 0, 1, 2)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (4, 5, 0, 1, 2, 3)
else:
raise NotImplementedError(lc_layer_2_from)
elif data_format == 'channels_last':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (2, 0, 1, 3)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (3, 4, 0, 1, 2, 5)
else:
raise NotImplementedError(lc_layer_2_from)
else:
raise NotImplementedError(data_format)
lc_2_kernel_masked = keras.backend.permute_dimensions(
lc_2_kernel_masked, permutation)
lc_2_kernel_mask = keras.backend.math_ops.not_equal(
lc_2_kernel_masked, 0)
lc_2_kernel_flat = keras.backend.array_ops.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,
lc_layer_1_to.kernel.shape)
lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])
def copy_model_weights(model_2_from, model_1_to):
for l in range(len(model_2_from.layers)):
layer_2_from = model_2_from.layers[l]
layer_1_to = model_1_to.layers[l]
if isinstance(layer_2_from, (keras.layers.LocallyConnected2D,
keras.layers.LocallyConnected1D)):
copy_lc_weights(layer_2_from, layer_1_to)
elif isinstance(layer_2_from, keras.layers.Dense):
weights_2, bias_2 = layer_2_from.weights
weights_2 = keras.backend.get_value(weights_2)
bias_2 = keras.backend.get_value(bias_2)
layer_1_to.set_weights([weights_2, bias_2])
else:
continue
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/local_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers that can merge several inputs into one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Arguments:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Arguments:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(
'Operands could not be broadcast '
'together with shapes ' + str(shape1) + ' ' + str(shape2))
output_shape.append(i)
return tuple(output_shape)
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('A merge layer should be called on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) > 1:
raise ValueError(
'Can not merge tensors with different '
'batch sizes. Got tensors with shapes : ' + str(input_shape))
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
def call(self, inputs):
if not isinstance(inputs, list):
raise ValueError('A merge layer should be called on a list of inputs.')
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(K.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = K.ndim(x)
for _ in range(max_ndim - x_ndim):
x = array_ops.expand_dims(x, axis=1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = K.ndim(x)
if x_ndim is None:
x_shape = array_ops.shape(x)
batch_size = x_shape[0]
new_shape = K.concatenate(
[x_shape[1:],
array_ops.expand_dims(batch_size, axis=-1)])
x_transposed = array_ops.reshape(
x,
array_ops.stack(
[batch_size, math_ops.reduce_prod(x_shape[1:])], axis=0))
x_transposed = array_ops.transpose(x_transposed, perm=(1, 0))
x_transposed = array_ops.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(array_ops.transpose(x, perm=dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = K.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = array_ops.shape(y)
y_ndim = array_ops.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = K.concatenate([
array_ops.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1]
])
y = array_ops.reshape(y, (-1, batch_size))
y = array_ops.transpose(y, perm=(1, 0))
y = array_ops.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = array_ops.transpose(y, perm=dims)
return y
else:
return self._merge_function(inputs)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all(m is None for m in mask):
return None
masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
@keras_export('keras.layers.Add')
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# equivalent to `added = keras.layers.add([x1, x2])`
added = keras.layers.Add()([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
@keras_export('keras.layers.Subtract')
class Subtract(_Merge):
"""Layer that subtracts two inputs.
It takes as input a list of tensors of size 2,
both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),
also of the same shape.
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# Equivalent to subtracted = keras.layers.subtract([x1, x2])
subtracted = keras.layers.Subtract()([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
@tf_utils.shape_type_conversion
def build(self, input_shape):
super(Subtract, self).build(input_shape)
if len(input_shape) != 2:
raise ValueError('A `Subtract` layer should be called '
'on exactly 2 inputs')
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError('A `Subtract` layer should be called '
'on exactly 2 inputs')
return inputs[0] - inputs[1]
@keras_export('keras.layers.Multiply')
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output *= inputs[i]
return output
@keras_export('keras.layers.Average')
class Average(_Merge):
"""Layer that averages a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output / len(inputs)
@keras_export('keras.layers.Maximum')
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = math_ops.maximum(output, inputs[i])
return output
@keras_export('keras.layers.Minimum')
class Minimum(_Merge):
"""Layer that computes the minimum (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = math_ops.minimum(output, inputs[i])
return output
@keras_export('keras.layers.Concatenate')
class Concatenate(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors,
all of the same shape except for the concatenation axis,
and returns a single tensor, the concatenation of all inputs.
Arguments:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
def __init__(self, axis=-1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) < 2:
raise ValueError('A `Concatenate` layer should be called '
'on a list of at least 2 inputs')
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('A `Concatenate` layer requires '
'inputs with matching shapes '
'except for the concat axis. '
'Got inputs shapes: %s' % (input_shape))
def _merge_function(self, inputs):
return K.concatenate(inputs, axis=self.axis)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(array_ops.ones_like(input_i, dtype='bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(array_ops.expand_dims(mask_i, axis=-1))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.axis)
return K.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dot')
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to a list of two tensors `a` and `b` of shape
`(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
Arguments:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
def __init__(self, axes, normalize=False, **kwargs):
super(Dot, self).__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError('Invalid type for `axes` - '
'should be a list or an int.')
if len(axes) != 2:
raise ValueError('Invalid format for `axes` - '
'should contain two elements.')
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError('Invalid format for `axes` - '
'list elements should be "int".')
self.axes = axes
self.normalize = normalize
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = input_shape[0]
shape2 = input_shape[1]
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError('Dimension incompatibility '
'%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError('A `Dot` layer should be called on exactly 2 inputs')
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % K.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = nn.l2_normalize(x1, axis=axes[0])
x2 = nn.l2_normalize(x2, axis=axes[1])
output = K.batch_dot(x1, x2, axes)
return output
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
'axes': self.axes,
'normalize': self.normalize,
}
base_config = super(Dot, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.add')
def add(inputs, **kwargs):
"""Functional interface to the `Add` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the sum of the inputs.
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
added = keras.layers.add([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
return Add(**kwargs)(inputs)
@keras_export('keras.layers.subtract')
def subtract(inputs, **kwargs):
"""Functional interface to the `Subtract` layer.
Arguments:
inputs: A list of input tensors (exactly 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the difference of the inputs.
Examples:
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
subtracted = keras.layers.subtract([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
"""
return Subtract(**kwargs)(inputs)
@keras_export('keras.layers.multiply')
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
@keras_export('keras.layers.average')
def average(inputs, **kwargs):
"""Functional interface to the `Average` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the average of the inputs.
"""
return Average(**kwargs)(inputs)
@keras_export('keras.layers.maximum')
def maximum(inputs, **kwargs):
"""Functional interface to the `Maximum` layer that computes
the maximum (element-wise) list of `inputs`.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Arguments:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs)
@keras_export('keras.layers.minimum')
def minimum(inputs, **kwargs):
"""Functional interface to the `Minimum` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise minimum of the inputs.
"""
return Minimum(**kwargs)(inputs)
@keras_export('keras.layers.concatenate')
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
@keras_export('keras.layers.dot')
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
| tensorflow-master | tensorflow/python/keras/layers/merge.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=op_padding,
data_format=conv_utils.convert_data_format(self.data_format,
self.rank + 2))
self.built = True
def call(self, inputs):
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Examples:
```python
# Small convolutional model for 128-length vectors with 6 timesteps
# model.input_shape == (None, 6, 128)
model = Sequential()
model.add(Conv1D(32, 3,
activation='relu',
input_shape=(6, 128)))
# now: model.output_shape == (None, 4, 32)
```
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
return super(Conv1D, self).call(inputs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Arguments:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Arguments:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: Int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Arguments:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Arguments:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| tensorflow-master | tensorflow/python/keras/layers/convolutional.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedding layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class EmbeddingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_embedding(self):
if tf_test_util.is_gpu_available():
self.skipTest('Only test embedding on CPU.')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'input_length': 2},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
testing_utils.layer_test(
keras.layers.Embedding,
kwargs={'output_dim': 4,
'input_dim': 10,
'mask_zero': True,
'input_length': (None, 2)},
input_shape=(3, 4, 2),
input_dtype='int32',
expected_output_dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_embedding_correctness(self):
layer = keras.layers.Embedding(output_dim=2, input_dim=2)
model = keras.models.Sequential([layer])
layer.set_weights([np.array([[1, 1], [2, 2]])])
model.run_eagerly = testing_utils.should_run_eagerly()
outputs = model.predict(np.array([[0, 1, 0]], dtype='int32'))
self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])
@tf_test_util.run_in_graph_and_eager_modes
def test_eager_gpu_cpu(self):
l = keras.layers.Embedding(output_dim=2, input_dim=2)
l.build((None, 2))
inputs = keras.backend.constant([[0, 1, 0]], dtype='int32')
with backprop.GradientTape() as tape:
output = l(inputs)
gs = tape.gradient(output, l.weights)
opt = adagrad.AdagradOptimizer(0.1)
opt.apply_gradients(zip(gs, l.weights))
self.assertAllEqual(len(gs), 1)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/embeddings_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class BatchNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_mixed_precision(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_policy(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype=policy.Policy('infer_float32_vars'))
x = np.random.normal(size=(10, 4, 4, 3)).astype('float16')
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@tf_test_util.run_in_graph_and_eager_modes
def test_batchnorm_non_trainable_with_tf_function(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
loss_fn = keras.losses.MeanSquaredError()
optimizer = rmsprop_v2.RMSprop()
@def_function.function()
def train_step(x, y):
with backprop.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss
@def_function.function()
def test_step(x, y):
y_pred = model(x, training=False)
loss = loss_fn(y, y_pred)
return loss
train_step(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
test_targets = np.random.random((10, 3))
test_loss = test_step(test_data, test_targets)
bn.trainable = False
train_loss = train_step(test_data, test_targets)
if context.executing_eagerly():
self.assertAlmostEqual(test_loss.numpy(), train_loss.numpy())
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.bn = keras.layers.BatchNormalization()
@def_function.function()
def call(self, x, training):
return self.bn(x, training=training)
with context.eager_mode():
model = MyModel()
for _ in range(10):
x = constant_op.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
class BatchNormalizationV1Test(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@tf_test_util.run_in_graph_and_eager_modes
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegexp(ValueError, 'fused.*renorm'):
normalization_v2.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'):
normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'):
normalization_v2.BatchNormalization(fused=True,
adjustment=lambda _: (1, 0))
norm = normalization_v2.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegexp(ValueError, '4D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
with context.eager_mode():
layer = keras.layers.BatchNormalization()
def my_func():
x = array_ops.ones((10, 1))
return layer(x, training=True)
wrapped_fn = wrap_function.wrap_function(my_func, [])
wrapped_fn()
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
@parameterized.parameters(
[normalization.BatchNormalization, normalization_v2.BatchNormalization])
class NormalizationLayersGraphModeOnlyTest(
test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
self.assertLen(bn.updates, 4)
self.assertLen(bn.get_updates_for(x1), 2)
self.assertLen(model.get_updates_for(x2), 2)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertLen(new_model.updates, 6)
self.assertLen(model.updates, 6)
self.assertLen(new_model.get_updates_for(x3), 2)
new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
@tf_test_util.run_deprecated_v1
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
norm = layer(input_shape=(2, 2, 2))
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
@tf_test_util.run_in_graph_and_eager_modes
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@tf_test_util.run_in_graph_and_eager_modes
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
def testIncorrectAxisType(self):
with self.assertRaisesRegexp(
ValueError, r'Expected an int or a list/tuple of ints'):
_ = normalization.LayerNormalization(axis={'axis': -1})
@tf_test_util.run_in_graph_and_eager_modes
def testInvalidAxis(self):
with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'):
layer_norm = normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@tf_test_util.run_in_graph_and_eager_modes
def testDuplicateAxis(self):
with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'):
layer_norm = normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/normalization_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Arguments:
cells: List of RNN cell instances.
Examples:
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you rely on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
return tuple(c.state_size for c in
(self.cells[::-1] if self.reverse_state_order else self.cells))
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return tuple(initial_states)
def call(self, inputs, states, constants=None, **kwargs):
# Recover per-cell states.
state_size = (self.state_size[::-1]
if self.reverse_state_order else self.state_size)
nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if nest.is_sequence(states) else [states]
# TF cell does not wrap the state into list when there is only one state.
is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states, constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, nest.pack_sequence_as(state_size,
nest.flatten(new_nested_states))
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if not cell.built:
cell.build(input_shape)
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = tuple([input_shape[0]] +
tensor_shape.as_shape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({
'class_name': cell.__class__.__name__,
'config': cell.get_config()
})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@keras_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
Arguments:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per
state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- A `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- A `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state via other
means. The returned initial state should have a shape of
[batch_size, cell.state_size]. The cell might choose to create a
tensor full of zeros, or full of other values based on the cell's
implementation.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be `None` during the graph construction. Either
the `inputs` or the pair of `batch_size` and `dtype` are provided.
`batch_size` is a scalar tensor that represents the batch size
of the inputs. `dtype` is `tf.DType` that represents the dtype of
the inputs.
For backward compatible reason, if this method is not implemented
by the cell, the RNN layer will create a zero filled tensor with the
size of [batch_size, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: Input tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
N-D tensor with shape `(batch_size, timesteps, ...)` or
`(timesteps, batch_size, ...)` when time_major is True.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: N-D tensor with shape
`(batch_size, timesteps, output_size)`, where `output_size` could
be a high dimension tensor shape, or
`(timesteps, batch_size, output_size)` when `time_major` is True.
- Else, N-D tensor with shape `(batch_size, output_size)`, where
`output_size` could be a high dimension tensor shape.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked timestep.
self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)
if 'input_shape' not in kwargs and (
'input_dim' in kwargs or 'input_length' in kwargs):
input_shape = (kwargs.pop('input_length', None),
kwargs.pop('input_dim', None))
kwargs['input_shape'] = input_shape
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for nested inputs, the structure
# of the input_spec will be the same as the input.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = 0
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
# Automatic tracking catches "self._states" which adds an extra weight and
# breaks HDF5 checkpoints.
@trackable.no_automatic_dependency_tracking
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not nest.is_sequence(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
if self.input_spec is not None:
self.input_spec[0] = nest.map_structure(get_input_spec, input_shape)
else:
self.input_spec = generic_utils.to_list(
nest.map_structure(get_input_spec, input_shape))
step_input_shape = nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
if not self.cell.built:
self.cell.build(step_input_shape)
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
self.state_spec = [
InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list())
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in `call()`.
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
flat_cell_state_size = nest.flatten(cell_state_sizes)
flat_state_spec = nest.flatten(init_state_specs)
if len(flat_cell_state_size) != len(flat_state_spec):
raise validation_error
for i in range(len(flat_cell_state_size)):
if not tensor_shape.TensorShape(
# Ignore the first axis for init_state which is for batch
flat_state_spec[i].shape[1:]).is_compatible_with(
tensor_shape.TensorShape(flat_cell_state_size[i])):
raise validation_error
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if nest.is_sequence(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = nest.flatten(inputs)[0]
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not nest.is_sequence(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = nest.map_structure(
lambda s: InputSpec(shape=K.int_shape(s)), initial_state)
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=K.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = K.is_keras_tensor(nest.flatten(additional_inputs)[0])
for tensor in nest.flatten(additional_inputs):
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = generic_utils.to_list(
nest.map_structure(lambda _: None, inputs)) + additional_specs
# Perform the call with temporarily replaced input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
# Remove the additional_specs from input spec and keep the rest. It is
# important to keep since the input spec was populated by build(), and
# will be reused in the stateful=True.
self.input_spec = self.input_spec[:-len(additional_specs)]
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
if mask is not None:
# Time step masks must be the same for each input.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
if nest.is_sequence(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = K.int_shape(nest.flatten(inputs)[0])
else:
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(
inputs, states, constants=constants, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(inputs, states, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
if self.stateful:
updates = []
for state_, state in zip(nest.flatten(self.states), nest.flatten(states)):
updates.append(state_ops.assign(state_, state))
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if (isinstance(inputs, collections.Sequence)
and not isinstance(inputs, tuple)):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if not self._num_constants:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
spec_shape = None if self.input_spec is None else self.input_spec[0].shape
if spec_shape is None:
# It is possible to have spec shape to be None, eg when construct a RNN
# with a custom cell, or standard RNN layers (LSTM/GRU) which we only know
# it has 3 dim input, but not its full shape spec before build().
batch_size = None
else:
batch_size = spec_shape[1] if self.time_major else spec_shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if nest.flatten(self.states)[0] is None:
def create_state_variable(state):
return K.zeros([batch_size] + tensor_shape.as_shape(state).as_list())
self.states = nest.map_structure(
create_state_variable, self.cell.state_size)
if not nest.is_sequence(self.states):
self.states = [self.states]
elif states is None:
for state, size in zip(nest.flatten(self.states),
nest.flatten(self.cell.state_size)):
K.set_value(state, np.zeros([batch_size] +
tensor_shape.as_shape(size).as_list()))
else:
flat_states = nest.flatten(self.states)
flat_input_states = nest.flatten(states)
if len(flat_input_states) != len(flat_states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(flat_states)) + ' states, '
'but it received ' + str(len(flat_input_states)) +
' state values. Input received: ' + str(states))
set_value_tuples = []
for i, (value, state) in enumerate(zip(flat_input_states,
flat_states)):
if value.shape != state.shape:
raise ValueError(
'State ' + str(i) + ' is incompatible with layer ' +
self.name + ': expected shape=' + str(
(batch_size, state)) + ', found shape=' + str(value.shape))
set_value_tuples.append((state, value))
K.batch_set_value(set_value_tuples)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants:
config['num_constants'] = self._num_constants
if self.zero_output_for_mask:
config['zero_output_for_mask'] = self.zero_output_for_mask
cell_config = self.cell.get_config()
config['cell'] = {
'class_name': self.cell.__class__.__name__,
'config': cell_config
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', 0)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@keras_export('keras.layers.AbstractRNNCell')
class AbstractRNNCell(Layer):
"""Abstract object representing an RNN cell.
This is the base class for implementing RNN cells with custom behavior.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`.
Examples:
```python
class MinimalRNNCell(AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, output
```
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def call(self, inputs, states):
"""The function that contains the logic for one RNN step calculation.
Args:
inputs: the input tensor, which is a slide from the overall RNN input by
the time dimension (usually the second dimension).
states: the state tensor from previous step, which has the same shape
as `(batch, state_size)`. In the case of timestep 0, it will be the
initial state user specified, or zero filled tensor otherwise.
Returns:
A tuple of two tensors:
1. output tensor for the current timestep, with size `output_size`.
2. state tensor for next step, which has the shape of `state_size`.
"""
raise NotImplementedError('Abstract method')
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError('Abstract method')
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError('Abstract method')
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
class DropoutRNNCellMixin(object):
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN cell
by multiple inheritance. Any cell that mix with class should have following
fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
# Note that the following two masks will be used in "graph function" mode,
# e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
# tensors will be generated differently than in the "graph function" case,
# and they will be cached.
# Also note that in graph mode, we still cache those masks only because the
# RNN could be created with `unroll=True`. In that case, the `cell.call()`
# function will be invoked multiple times, and we want to ensure same mask
# is used every time.
self._dropout_mask = None
self._recurrent_dropout_mask = None
self._eager_dropout_mask = None
self._eager_recurrent_dropout_mask = None
super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._dropout_mask = None
self._eager_dropout_mask = None
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._recurrent_dropout_mask = None
self._eager_recurrent_dropout_mask = None
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: the input tensor whose shape will be used to generate dropout
mask.
training: boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
if (not context.executing_eagerly() and self._dropout_mask is None
or context.executing_eagerly() and self._eager_dropout_mask is None):
# Generate new mask and cache it based on context.
dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_dropout_mask = dp_mask
else:
self._dropout_mask = dp_mask
else:
# Reuse the existing mask.
dp_mask = (self._eager_dropout_mask
if context.executing_eagerly() else self._dropout_mask)
return dp_mask
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: the input tensor whose shape will be used to generate dropout
mask.
training: boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
if (not context.executing_eagerly() and self._recurrent_dropout_mask is None
or context.executing_eagerly()
and self._eager_recurrent_dropout_mask is None):
# Generate new mask and cache it based on context.
rec_dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_recurrent_dropout_mask = rec_dp_mask
else:
self._recurrent_dropout_mask = rec_dp_mask
else:
# Reuse the existing mask.
rec_dp_mask = (self._eager_recurrent_dropout_mask
if context.executing_eagerly()
else self._recurrent_dropout_mask)
return rec_dp_mask
@keras_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(DropoutRNNCellMixin, Layer):
"""Cell class for SimpleRNN.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
dp_mask = self.get_dropout_mask_for_cell(inputs, training)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
prev_output, training)
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
return output, [output]
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@keras_export(v1=['keras.layers.GRUCell'])
class GRUCell(DropoutRNNCellMixin, Layer):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = array_ops.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel[:, :self.units])
x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = K.bias_add(x_z, input_bias[:self.units])
x_r = K.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = K.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = K.dot(h_tm1_r,
self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = K.bias_add(recurrent_r,
recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h,
self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, input_bias)
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
x_h = matrix_x[:, 2 * self.units:]
if 0. < self.recurrent_dropout < 1.:
h_tm1 = h_tm1 * rec_dp_mask[0]
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * matrix_inner[:, 2 * self.units:]
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@keras_export(v1=['keras.layers.GRU'])
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@keras_export(v1=['keras.layers.LSTMCell'])
class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
# tuple(_ListWrapper) was silently dropping list content in at least 2.7.10,
# and fixed after 2.7.16. Converting the state_size to wrapper around
# NoDependency(), so that the base_layer.__setattr__ will not convert it to
# ListWrapper. Down the stream, self.states will be a list since it is
# generated from nest.map_structure with list, and tuple(list) will work
# properly.
self.state_size = data_structures.NoDependency([self.units, self.units])
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = array_ops.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = K.dot(inputs_i, k_i)
x_f = K.dot(inputs_f, k_f)
x_c = K.dot(inputs_c, k_c)
x_o = K.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = array_ops.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = K.bias_add(x_i, b_i)
x_f = K.bias_add(x_f, b_f)
x_c = K.bias_add(x_c, b_c)
x_o = K.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 = h_tm1 * rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z = array_ops.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@keras_export('keras.experimental.PeepholeLSTMCell')
class PeepholeLSTMCell(LSTMCell):
"""Equivalent to LSTMCell class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al.](http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Long short-term memory recurrent neural network architectures for
large scale acoustic modeling.
](https://research.google.com/pubs/archive/43905.pdf)
Example:
```python
# Create 2 PeepholeLSTMCells
peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]
# Create a layer composed sequentially of the peephole LSTM cells.
layer = RNN(peephole_lstm_cells)
input = keras.Input((timesteps, input_dim))
output = layer(input)
```
"""
def build(self, input_shape):
super(PeepholeLSTMCell, self).build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='input_gate_peephole_weights',
initializer=self.kernel_initializer)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='forget_gate_peephole_weights',
initializer=self.kernel_initializer)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='output_gate_peephole_weights',
initializer=self.kernel_initializer)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
@keras_export(v1=['keras.layers.LSTM'])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return K.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Arguments:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants as eager tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states, which
# could be a list of items, or list of list if the initial_state is complex
# structure, and finally followed by constants which is a flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
| tensorflow-master | tensorflow/python/keras/layers/recurrent.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The V2 implementation of Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.layers import normalization
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.BatchNormalization', v1=[]) # pylint: disable=missing-docstring
class BatchNormalization(normalization.BatchNormalizationBase):
__doc__ = normalization.replace_in_base_docstring([
('{{TRAINABLE_ATTRIBUTE_NOTE}}',
'''
**About setting `layer.trainable = False` on a `BatchNormalization layer:**
The meaning of setting `layer.trainable = False` is to freeze the layer,
i.e. its internal state will not change during training:
its trainable weights will not be updated
during `fit()` or `train_on_batch()`, and its state updates will not be run.
Usually, this does not necessarily mean that the layer is run in inference
mode (which is normally controlled by the `training` argument that can
be passed when calling a layer). "Frozen state" and "inference mode"
are two separate concepts.
However, in the case of the `BatchNormalization` layer, **setting
`trainable = False` on the layer means that the layer will be
subsequently run in inference mode** (meaning that it will use
the moving mean and the moving variance to normalize the current batch,
rather than using the mean and variance of the current batch).
This behavior has been introduced in TensorFlow 2.0, in order
to enable `layer.trainable = False` to produce the most commonly
expected behavior in the convnet fine-tuning use case.
Note that:
- This behavior only occurs as of TensorFlow 2.0. In 1.*,
setting `layer.trainable = False` would freeze the layer but would
not switch it to inference mode.
- Setting `trainable` on an model containing other layers will
recursively set the `trainable` value of all inner layers.
- If the value of the `trainable`
attribute is changed after calling `compile()` on a model,
the new value doesn't take effect for this model
until `compile()` is called again.
''')])
_USE_V2_BEHAVIOR = True
| tensorflow-master | tensorflow/python/keras/layers/normalization_v2.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LocallyConnected1D')
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, length)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid' and implementation == 1:
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported if implementation is 1): '
+ padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=3)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_first':
input_dim, input_length = input_shape[1], input_shape[2]
else:
input_dim, input_length = input_shape[2], input_shape[1]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
self.output_length = conv_utils.conv_output_length(
input_length, self.kernel_size[0], self.padding, self.strides[0])
if self.implementation == 1:
self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
elif self.implementation == 2:
if self.data_format == 'channels_first':
self.kernel_shape = (input_dim, input_length,
self.filters, self.output_length)
else:
self.kernel_shape = (input_length, input_dim,
self.output_length, self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_mask = get_locallyconnected_mask(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format
)
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
else:
self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
input_length = input_shape[2]
else:
input_length = input_shape[1]
length = conv_utils.conv_output_length(input_length, self.kernel_size[0],
self.padding, self.strides[0])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, length)
elif self.data_format == 'channels_last':
return (input_shape[0], length, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_length,), self.data_format)
elif self.implementation == 2:
output = local_conv_matmul(inputs, self.kernel, self.kernel_mask,
self.compute_output_shape(inputs.shape))
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'implementation':
self.implementation
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.LocallyConnected2D')
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs):
super(LocallyConnected2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid' and implementation == 1:
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported if implementation is 1): '
+ padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=4)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
if self.implementation == 1:
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
elif self.implementation == 2:
if self.data_format == 'channels_first':
self.kernel_shape = (input_filter, input_row, input_col,
self.filters, self.output_row, self.output_col)
else:
self.kernel_shape = (input_row, input_col, input_filter,
self.output_row, self.output_col, self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_mask = get_locallyconnected_mask(
input_shape=(input_row, input_col),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format
)
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_row, self.output_col),
self.data_format)
elif self.implementation == 2:
output = local_conv_matmul(inputs, self.kernel, self.kernel_mask,
self.compute_output_shape(inputs.shape))
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'implementation':
self.implementation
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_locallyconnected_mask(input_shape,
kernel_shape,
strides,
padding,
data_format):
"""Return a mask representing connectivity of a locally-connected operation.
This method returns a masking numpy array of 0s and 1s (of type `np.float32`)
that, when element-wise multiplied with a fully-connected weight tensor, masks
out the weights between disconnected input-output pairs and thus implements
local connectivity through a sparse fully-connected weight tensor.
Assume an unshared convolution with given parameters is applied to an input
having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`
to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined
by layer parameters such as `strides`).
This method returns a mask which can be broadcast-multiplied (element-wise)
with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between
(N+1)-D activations (N spatial + 1 channel dimensions for input and output)
to make it perform an unshared convolution with given `kernel_shape`,
`strides`, `padding` and `data_format`.
Arguments:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`
spatial shape of the input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel
/ receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
data_format: a string, `"channels_first"` or `"channels_last"`.
Returns:
a `np.float32`-type `np.ndarray` of shape
`(1, d_in1, ..., d_inN, 1, d_out1, ..., d_outN)`
if `data_format == `"channels_first"`, or
`(d_in1, ..., d_inN, 1, d_out1, ..., d_outN, 1)`
if `data_format == "channels_last"`.
Raises:
ValueError: if `data_format` is neither `"channels_first"` nor
`"channels_last"`.
"""
mask = conv_utils.conv_kernel_mask(
input_shape=input_shape,
kernel_shape=kernel_shape,
strides=strides,
padding=padding
)
ndims = int(mask.ndim / 2)
if data_format == 'channels_first':
mask = np.expand_dims(mask, 0)
mask = np.expand_dims(mask, -ndims - 1)
elif data_format == 'channels_last':
mask = np.expand_dims(mask, ndims)
mask = np.expand_dims(mask, -1)
else:
raise ValueError('Unrecognized data_format: ' + str(data_format))
return mask
def local_conv_matmul(inputs, kernel, kernel_mask, output_shape):
"""Apply N-D convolution with un-shared weights using a single matmul call.
This method outputs `inputs . (kernel * kernel_mask)`
(with `.` standing for matrix-multiply and `*` for element-wise multiply)
and requires a precomputed `kernel_mask` to zero-out weights in `kernel` and
hence perform the same operation as a convolution with un-shared
(the remaining entries in `kernel`) weights. It also does the necessary
reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D.
Arguments:
inputs: (N+2)-D tensor with shape
`(batch_size, channels_in, d_in1, ..., d_inN)`
or
`(batch_size, d_in1, ..., d_inN, channels_in)`.
kernel: the unshared weights for N-D convolution,
an (N+2)-D tensor of shape:
`(d_in1, ..., d_inN, channels_in, d_out2, ..., d_outN, channels_out)`
or
`(channels_in, d_in1, ..., d_inN, channels_out, d_out2, ..., d_outN)`,
with the ordering of channels and spatial dimensions matching
that of the input.
Each entry is the weight between a particular input and
output location, similarly to a fully-connected weight matrix.
kernel_mask: a float 0/1 mask tensor of shape:
`(d_in1, ..., d_inN, 1, d_out2, ..., d_outN, 1)`
or
`(1, d_in1, ..., d_inN, 1, d_out2, ..., d_outN)`,
with the ordering of singleton and spatial dimensions
matching that of the input.
Mask represents the connectivity pattern of the layer and is
precomputed elsewhere based on layer parameters: stride,
padding, and the receptive field shape.
output_shape: a tuple of (N+2) elements representing the output shape:
`(batch_size, channels_out, d_out1, ..., d_outN)`
or
`(batch_size, d_out1, ..., d_outN, channels_out)`,
with the ordering of channels and spatial dimensions matching that of
the input.
Returns:
Output (N+2)-D tensor with shape `output_shape`.
"""
inputs_flat = K.reshape(inputs, (K.shape(inputs)[0], -1))
kernel = kernel_mask * kernel
kernel = make_2d(kernel, split_dim=K.ndim(kernel) // 2)
output_flat = K.math_ops.sparse_matmul(inputs_flat, kernel, b_is_sparse=True)
output = K.reshape(output_flat,
[K.shape(output_flat)[0],] + output_shape.as_list()[1:])
return output
def make_2d(tensor, split_dim):
"""Reshapes an N-dimensional tensor into a 2D tensor.
Dimensions before (excluding) and after (including) `split_dim` are grouped
together.
Arguments:
tensor: a tensor of shape `(d0, ..., d(N-1))`.
split_dim: an integer from 1 to N-1, index of the dimension to group
dimensions before (excluding) and after (including).
Returns:
Tensor of shape
`(d0 * ... * d(split_dim-1), d(split_dim) * ... * d(N-1))`.
"""
shape = K.array_ops.shape(tensor)
in_dims = shape[:split_dim]
out_dims = shape[split_dim:]
in_size = K.math_ops.reduce_prod(in_dims)
out_size = K.math_ops.reduce_prod(out_dims)
return K.array_ops.reshape(tensor, (in_size, out_size))
| tensorflow-master | tensorflow/python/keras/layers/local.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.layers import kernelized as kernel_layers
from tensorflow.python.keras.utils import kernelized_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
self.assertAllClose(expected, actual, atol=atol)
else:
self.assertAllClose(expected, actual, atol=atol)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_output_dim(self):
with self.assertRaisesRegexp(
ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_unsupported_kernel_type(self):
with self.assertRaisesRegexp(
ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
_ = kernel_layers.RandomFourierFeatures(
3, 'unsupported_kernel', stddev=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_scale(self):
with self.assertRaisesRegexp(
ValueError,
r'When provided, `scale` should be a positive float. Given: 0.0.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_input_shape(self):
inputs = random_ops.random_uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
with self.assertRaisesRegexp(
ValueError,
r'The rank of the input tensor should be 2. Got 3 instead.'):
_ = rff_layer(inputs)
@parameterized.named_parameters(
('gaussian', 'gaussian', 10.0, False),
('random', init_ops.random_uniform_initializer, 1.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=scale,
trainable=trainable)
self.assertEqual(rff_layer.output_dim, 10)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
@parameterized.named_parameters(('gaussian', 'gaussian', False),
('laplacian', 'laplacian', True),
('other', init_ops.ones_initializer, True))
@test_util.run_in_graph_and_eager_modes()
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=1.0,
trainable=trainable,
name='random_fourier_features')
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, 10], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_no_eager_Leak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
@test_util.run_in_graph_and_eager_modes()
def test_output_shape(self):
inputs = random_ops.random_uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=7, name='random_fourier_features', trainable=True)
outputs = rff_layer(inputs)
self.assertEqual([3, 7], outputs.shape.as_list())
@parameterized.named_parameters(
('gaussian', 'gaussian'), ('laplacian', 'laplacian'),
('other', init_ops.random_uniform_initializer))
@test_util.run_deprecated_v1
def test_call_on_placeholder(self, initializer):
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegexp(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5, name='random_fourier_features')
rff_layer(inputs)
@parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0),
('laplacian', 5, 'laplacian', None),
('other', 10, init_ops.ones_initializer, 1.0))
@test_util.run_in_graph_and_eager_modes()
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name='rff')
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3]))
with self.assertRaisesRegexp(
ValueError, r'The innermost dimension of input shape must be defined.'):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None]))
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape((None, 3)).as_list())
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape(
tensor_shape.TensorShape([None, 2])).as_list())
self.assertEqual([4, output_dim],
rff_layer.compute_output_shape((4, 1)).as_list())
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, False),
('laplacian', 5, 'laplacian', 5.5, True),
('other', 7, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features',
)
expected_initializer = initializer
if isinstance(initializer, init_ops.Initializer):
expected_initializer = initializers.serialize(initializer)
expected_config = {
'output_dim': output_dim,
'kernel_initializer': expected_initializer,
'scale': scale,
'name': 'random_fourier_features',
'trainable': trainable,
'dtype': None,
}
self.assertLen(expected_config, len(rff_layer.get_config()))
self.assertSameElements(
list(expected_config.items()), list(rff_layer.get_config().items()))
@parameterized.named_parameters(
('gaussian', 5, 'gaussian', None, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 7, init_ops.ones_initializer(), 2.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
'output_dim': output_dim,
'kernel_initializer': initializer,
'scale': scale,
'trainable': trainable,
'name': 'random_fourier_features',
}
rff_layer = kernel_layers.RandomFourierFeatures.from_config(model_config)
self.assertEqual(rff_layer.output_dim, output_dim)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, output_dim], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.trainable_variables, num_trainable_vars)
if trainable:
self.assertEqual('random_fourier_features/random_features_scale:0',
rff_layer.trainable_variables[0].name)
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 10, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_same_random_features_params_reused(self, output_dim, initializer,
scale, trainable):
"""Applying the layer on the same input twice gives the same output."""
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features')
inputs = constant_op.constant(
np.random.uniform(low=-1.0, high=1.0, size=(2, 4)))
output1 = rff_layer(inputs)
output2 = rff_layer(inputs)
self._assert_all_close(output1, output2)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0),
('other', init_ops.random_uniform_initializer(), 5.0))
@test_util.run_in_graph_and_eager_modes()
def test_different_params_similar_approximation(self, initializer, scale):
random_seed.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
output_dim=3000,
kernel_initializer=initializer,
scale=scale,
name='rff1')
rff_layer2 = kernel_layers.RandomFourierFeatures(
output_dim=2000,
kernel_initializer=initializer,
scale=scale,
name='rff2')
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
# Apply both layers to both inputs.
output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
# Compute the inner products of the outputs (on inputs x and y) for both
# layers. For any fixed random features layer rff_layer, and inputs x, y,
# rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0)))
@test_util.run_in_graph_and_eager_modes()
def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
small_output_dim = 10
random_seed.set_random_seed(1234)
# Initialize layer.
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=small_output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# Apply layer to both inputs.
output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
# The inner products of the outputs (on inputs x and y) approximates the
# real value of the RBF kernel but poorly since the output dimension of the
# layer is small.
exact_kernel_value = exact_kernel_fn(x, y)
approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
abs_error_eval = sess.run([abs_error])
self.assertGreater(abs_error_eval[0][0], 0.05)
self.assertLess(abs_error_eval[0][0], 0.5)
else:
self.assertGreater(abs_error, 0.05)
self.assertLess(abs_error, 0.5)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0)))
@test_util.run_in_graph_and_eager_modes()
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.
input_dim = 5
output_dim = 2000
x_rows = 20
y_rows = 30
x = constant_op.constant(
np.random.uniform(size=(x_rows, input_dim)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.uniform(size=(y_rows, input_dim)), dtype=dtypes.float32)
random_seed.set_random_seed(1234)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)
approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.05)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/kernelized_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class ConvLSTMTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len,
input_channel,
input_num_row, input_num_col)
else:
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'},
input_shape=inputs.shape)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'filters': filters,
'kernel_size': (num_row, num_col),
'stateful': True,
'batch_input_shape': inputs.shape,
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(np.ones_like(inputs),
np.random.random(out1.shape))
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'kernel_size': (num_row, num_col),
'stateful': True,
'filters': filters,
'batch_input_shape': inputs.shape,
'kernel_regularizer': keras.regularizers.L1L2(l1=0.01),
'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01),
'activity_regularizer': 'l2',
'bias_regularizer': 'l2',
'kernel_constraint': 'max_norm',
'recurrent_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def test_conv_lstm_dropout(self):
# check dropout
with self.cached_session():
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': 'channels_last',
'return_sequences': False,
'filters': 2,
'kernel_size': (3, 3),
'padding': 'same',
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(1, 2, 5, 5, 2))
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3)))
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/convolutional_recurrent_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
from tensorflow.python.ops.array_ops import concat
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import util as trackable_util
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TimeDistributedTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `TimeDistributed` layer with a `Layer` instance.'):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1)
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None))) # N by t_1 by t_2 by 6
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)))
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)))
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = keras.backend.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
def test_TimeDistributed_with_masking_layer(self):
with self.cached_session():
# test with Masking layer
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(keras.layers.Masking(
mask_value=0.,), input_shape=(None, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [model.layers[1].compute_mask(model.layers[1].input,
mask_outputs[-1])]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertEqual((mask_outputs_val[0]).all(),
model_input.all())
self.assertEqual((mask_outputs_val[1]).all(),
model_input.all())
def test_TimeDistributed_with_different_time_shapes(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph_1 = keras.backend.placeholder(shape=(None, 10, 13))
out_1 = time_dist(ph_1)
self.assertEqual(out_1.shape.as_list(), [None, 10, 5])
ph_2 = keras.backend.placeholder(shape=(None, 1, 13))
out_2 = time_dist(ph_2)
self.assertEqual(out_2.shape.as_list(), [None, 1, 5])
ph_3 = keras.backend.placeholder(shape=(None, 1, 18))
with self.assertRaisesRegexp(ValueError, 'is incompatible with layer'):
time_dist(ph_3)
def test_TimeDistributed_with_invalid_dimensions(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph = keras.backend.placeholder(shape=(None, 10))
with self.assertRaisesRegexp(
ValueError,
'`TimeDistributed` Layer should be passed an `input_shape `'):
time_dist(ph)
@tf_test_util.run_in_graph_and_eager_modes
def test_TimeDistributed_reshape(self):
class NoReshapeLayer(keras.layers.Layer):
def call(self, inputs):
return inputs
# Built-in layers that aren't stateful use the reshape implementation.
td1 = keras.layers.TimeDistributed(keras.layers.Dense(5))
self.assertTrue(td1._always_use_reshape)
# Built-in layers that are stateful don't use the reshape implementation.
td2 = keras.layers.TimeDistributed(
keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True))
self.assertFalse(td2._always_use_reshape)
# Custom layers are not whitelisted for the fast reshape implementation.
td3 = keras.layers.TimeDistributed(NoReshapeLayer())
self.assertFalse(td3._always_use_reshape)
@tf_test_util.run_in_graph_and_eager_modes
def test_TimeDistributed_output_shape_return_types(self):
class TestLayer(keras.layers.Layer):
def call(self, inputs):
return concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
output_shape = tensor_shape.TensorShape(output_shape)
return output_shape
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.TimeDistributed(layer())
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 8])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 8])
@tf_test_util.run_all_in_graph_and_eager_modes
class BidirectionalTest(test.TestCase, parameterized.TestCase):
def test_bidirectional(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
for mode in ['sum', 'concat', 'ave', 'mul']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `Bidirectional` layer with a `Layer` instance.'):
keras.layers.Bidirectional(x)
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_Bidirectional_merged_value(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
for merge_mode in ['sum', 'mul', 'ave', 'concat', None]:
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_state_reuse_with_np_input(self):
# See https://github.com/tensorflow/tensorflow/issues/28761 for more detail.
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = np.random.rand(samples, timesteps, dim).astype(np.float32)
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
input2 = np.random.rand(samples, timesteps, dim).astype(np.float32)
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates(self):
if context.executing_eagerly():
self.skipTest('layer.updates is only available in graph mode.')
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_update = x * x
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert not layer.updates
assert not layer.get_updates_for(None)
assert not layer.get_updates_for(x)
# TODO(b/128684069): Remove when Wrapper sublayers are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
layer.forward_layer.add_update(x_reachable_update, inputs=x)
layer.forward_layer.add_update(1, inputs=None)
layer.backward_layer.add_update(x_reachable_update, inputs=x)
layer.backward_layer.add_update(1, inputs=None)
assert len(layer.updates) == 4
assert len(layer.get_updates_for(None)) == 2
assert len(layer.get_updates_for(x)) == 2
def test_Bidirectional_losses(self):
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_loss = x * x
layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(
3, kernel_regularizer='l1', bias_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 4
assert len(layer.get_losses_for(None)) == 4
assert not layer.get_losses_for(x)
# Create a random tensor that is not conditional on the inputs.
with keras.backend.get_graph().as_default():
const_tensor = constant_op.constant(1)
layer.forward_layer.add_loss(x_reachable_loss, inputs=x)
layer.forward_layer.add_loss(const_tensor, inputs=None)
layer.backward_layer.add_loss(x_reachable_loss, inputs=x)
layer.backward_layer.add_loss(const_tensor, inputs=None)
assert len(layer.losses) == 8
assert len(layer.get_losses_for(None)) == 6
assert len(layer.get_losses_for(x)) == 2
def test_Bidirectional_with_constants(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_with_constants_layer_passing_initial_state(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
s_for = keras.Input((32,))
s_bac = keras.Input((32,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)),
np.zeros((6, 32)),
np.zeros((6, 32)),
np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Verify that state is used
y_np_2_different_s = model.predict(
[x_np, s_fw_np + 10., s_bk_np + 10., c_np])
assert np.mean(y_np - y_np_2_different_s) != 0
# Test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes
def test_Bidirectional_output_shape_return_types(self):
class TestLayer(keras.layers.SimpleRNN):
def call(self, inputs):
return concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
return tensor_shape.TensorShape(output_shape)
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.Bidirectional(layer(1))
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 16])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 16])
def test_Bidirectional_last_output_with_masking(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2. Last output should be same as state,
# not zeroed.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 5)
self.assertEqual(outputs[0].shape.as_list(), [None, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 5)
self.assertAllClose(y[0], np.concatenate([y[1], y[3]], axis=1))
def test_Bidirectional_sequence_output_with_masking(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2, and expect the output of timestep 2
# is also 0s.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_sequences=True),
merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 1)
self.assertEqual(outputs[0].shape.as_list(), [None, timesteps, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 1)
self.assertAllClose(y[0][0, 2], np.zeros(units * 2))
@parameterized.parameters(['sum', 'concat'])
@tf_test_util.run_in_graph_and_eager_modes
def test_custom_backward_layer(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
forward_layer = rnn(output_dim)
backward_layer = rnn(output_dim, go_backwards=True)
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
forward_layer,
merge_mode=mode,
backward_layer=backward_layer,
input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape((None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
@tf_test_util.run_in_graph_and_eager_modes
def test_custom_backward_layer_error_check(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units)
with self.assertRaisesRegexp(ValueError,
'should have different `go_backwards` value.'):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
for attr in ('stateful', 'return_sequences', 'return_state'):
kwargs = {attr: True}
backward_layer = rnn(units, go_backwards=True, **kwargs)
with self.assertRaisesRegexp(
ValueError, 'expected to have the same value for attribute ' + attr):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
def test_custom_backward_layer_serialization(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
layer_from_config = keras.layers.Bidirectional.from_config(config)
new_config = layer_from_config.get_config()
self.assertDictEqual(config, new_config)
def test_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
layer = keras.layers.Bidirectional(rnn(units, name='rnn'))
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'rnn')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_rnn')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_rnn')
def test_custom_backward_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'lstm')
self.assertEqual(config['backward_layer']['config']['name'], 'lstm_1')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_lstm')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_lstm_1')
def test_rnn_with_customized_cell(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
class ResidualLSTMCell(keras.layers.LSTMCell):
def call(self, inputs, states, training=None):
output, states = super(ResidualLSTMCell, self).call(inputs, states)
return output + inputs, states
cell = ResidualLSTMCell(units)
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
# Test stacking
cell = [ResidualLSTMCell(units), ResidualLSTMCell(units)]
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@tf_test_util.run_v2_only
def test_wrapped_rnn_cell(self):
# See https://github.com/tensorflow/tensorflow/issues/26581.
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = keras.layers.LSTMCell(units)
cell = ResidualWrapper(cell)
rnn = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(rnn, merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/wrappers_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class GRULayerTest(keras_parameterized.TestCase):
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_reset_after_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=num_samples,
test_samples=0,
input_shape=(timesteps, embedding_dim),
num_classes=units)
y_train = keras.utils.to_categorical(y_train, units)
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
gru_layer = keras.layers.GRU(units,
reset_after=True)
output = gru_layer(inputs)
gru_model = keras.models.Model(inputs, output)
gru_model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
def test_with_masking_layer_GRU(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
@tf_test_util.run_all_in_graph_and_eager_modes
class GRULayerGenericTest(test.TestCase):
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/gru_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
pad_axis = 2 if self.data_format == 'channels_last' else 3
inputs = array_ops.expand_dims(inputs, pad_axis)
outputs = self.pool_function(
inputs,
self.pool_size + (1,),
strides=self.strides + (1,),
padding=self.padding,
data_format=self.data_format)
return array_ops.squeeze(outputs, pad_axis)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], features, length])
else:
return tensor_shape.TensorShape([input_shape[0], length, features])
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
}
base_config = super(Pooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')
class MaxPooling1D(Pooling1D):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(MaxPooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='max'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
@keras_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D')
class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the average pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(AveragePooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='avg'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class Pooling2D(Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D')
class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D')
class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: Tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D')
class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers."""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling1D',
'keras.layers.GlobalAvgPool1D')
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
a given step should be masked (excluded from the average).
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape `(batch_size, features)`.
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalAveragePooling1D, self).__init__(data_format=data_format,
**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, backend.floatx())
input_shape = inputs.shape.as_list()
broadcast_shape = [-1, input_shape[steps_axis], 1]
mask = array_ops.reshape(mask, broadcast_shape)
inputs *= mask
return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum(
mask, axis=steps_axis)
else:
return backend.mean(inputs, axis=steps_axis)
def compute_mask(self, inputs, mask=None):
return None
@keras_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D')
class GlobalMaxPooling1D(GlobalPooling1D):
"""Global max pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape `(batch_size, features)`.
"""
def call(self, inputs):
steps_axis = 1 if self.data_format == 'channels_last' else 2
return backend.max(inputs, axis=steps_axis)
class GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling2D',
'keras.layers.GlobalAvgPool2D')
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2])
else:
return backend.mean(inputs, axis=[2, 3])
@keras_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D')
class GlobalMaxPooling2D(GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2])
else:
return backend.max(inputs, axis=[2, 3])
class GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers."""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling3D',
'keras.layers.GlobalAvgPool3D')
class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3])
else:
return backend.mean(inputs, axis=[2, 3, 4])
@keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3])
else:
return backend.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
| tensorflow-master | tensorflow/python/keras/layers/pooling.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = rnn.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer.could_use_cudnn)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = rnn.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = rnn.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = rnn.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = rnn.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = rnn.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = nest.map_structure(variables.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = rnn.LSTM(units)(
inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
masked = keras.layers.Masking()(inputs)
layer = rnn.LSTM(units, return_state=True, stateful=True)
outputs = layer(masked)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = rnn.LSTM(
units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = rnn.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = rnn.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
# Due to b/120160788.
@test_util.run_v2_only
def test_lstm_v2_feature_parity_with_canonical_lstm(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=random_seed.DEFAULT_GRAPH_SEED)
y_train = keras.utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
lstm_layer = rnn_v1.LSTM(rnn_state_size,
recurrent_activation='sigmoid')
output = lstm_layer(masked_input)
lstm_model = keras.models.Model(inputs, output)
weights = lstm_model.get_weights()
y_1 = lstm_model.predict(x_train)
lstm_model.compile('rmsprop', 'mse')
lstm_model.fit(x_train, y_train)
y_2 = lstm_model.predict(x_train)
with test_util.device(use_gpu=True):
cudnn_layer = rnn.LSTM(rnn_state_size)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
@parameterized.named_parameters(('v0', 0), ('v1', 1), ('v2', 2))
def DISABLED_test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'implementation': implementation_mode
},
input_shape=(num_samples, timesteps, embedding_dim))
layer_class = rnn.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
layer_class = rnn.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
lstm_model = build_model(rnn_v1.LSTM)
y_ref = lstm_model.predict(x_train)
weights = lstm_model.get_weights()
lstm_v2_model = build_model(rnn.LSTM)
lstm_v2_model.set_weights(weights)
y = lstm_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_lstm_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.LSTM(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_lstm_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with test_util.device(use_gpu=False):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with test_util.device(use_gpu=True):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the LSTM V2 uses
# 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve
# the same output.
with test_util.device(use_gpu=True):
layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid')
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
# Remove the extra cudnn bias since canonical lstm will not use it.
canonical_model.set_weights(weights[:3])
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2)
self.assertAllClose(y_2, y_3)
def DISABLED_test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True
},
input_shape=(num_samples, timesteps, embedding_dim))
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = rnn.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
# Run in V2 only due to b/120160788.
@test_util.run_v2_only
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse', run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_LSTM_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.LSTM(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1
},
input_shape=(num_samples, timesteps, embedding_dim))
def test_bidirectional(self):
batch = 128
timestep = 20
vocab_size = 1000
model = keras.Sequential([
keras.layers.Embedding(vocab_size, 64),
keras.layers.Bidirectional(rnn.LSTM(
64, return_sequences=True)),
keras.layers.Bidirectional(rnn.LSTM(32)),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
x = np.random.randint(0, vocab_size, size=(batch, timestep))
y = np.random.randint(0, 1, size=(batch))
model.fit(x, y, epochs=1, shuffle=False)
model.evaluate(x, y)
model.predict(x)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
def test_LSTM_runtime(self):
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
# Due to b/120160788.
@test_util.run_v2_only
def test_LSTM_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
zeros = array_ops.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the lstm layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
class LSTMPerformanceTest(test.Benchmark):
def _measure_performance(self, test_config, model, x_train, y_train):
batch = test_config['batch']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
# warm up the model
model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch)
start_time = time.time()
model.fit(x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch)
end_time = time.time()
return (end_time - start_time) / (epoch - warmup_epoch)
def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train):
# Get the performance number for standard Cudnn LSTM
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = cudnn_lstm_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'CuDNN LSTM', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train):
# Get performance number for lstm_v2 with grappler swap the impl
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'LSTM V2', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_normal_lstm(
self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn_v1.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'Normal LSTM', sec_per_epoch)
return sec_per_epoch
def _benchmark_performance_with_standard_cudnn_impl(self):
if not test.is_gpu_available():
self.skipTest('performance test will only run on GPU')
mode = 'eager' if context.executing_eagerly() else 'graph'
batch = 64
num_batch = 10
test_config = {
'input_shape': 128,
'rnn_state_size': 64,
'output_shape': 64,
'timestep': 50,
'batch': batch,
'epoch': 20,
# The performance for warmup epoch is ignored.
'warmup_epoch': 1,
}
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=(batch * num_batch),
test_samples=0,
input_shape=(test_config['timestep'], test_config['input_shape']),
num_classes=test_config['output_shape'])
y_train = keras.utils.to_categorical(y_train, test_config['output_shape'])
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
test_config, x_train, y_train)
lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
test_config, x_train, y_train)
normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
test_config, x_train, y_train)
cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch
v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch
self.report_benchmark(name='keras_cudnn_lstm_' + mode,
wall_time=cudnn_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_lstm_v2_' + mode,
wall_time=lstm_v2_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_canonical_lstm_' + mode,
wall_time=normal_lstm_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
logging.info('Expect the performance of LSTM V2 is within 80% of '
'CuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_v2 * 100))
logging.info('Expect the performance of LSTM V2 is more than 5 times'
' of normal LSTM, got {0:.2f}'.format(v2_vs_normal))
def benchmark_performance_graph(self):
with context.graph_mode(), session_lib.Session(config=_config):
self._benchmark_performance_with_standard_cudnn_impl()
def benchmark_performance_eager(self):
with context.eager_mode():
self._benchmark_performance_with_standard_cudnn_impl()
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/lstm_v2_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
# Advanced activations.
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras.layers.advanced_activations import ELU
from tensorflow.python.keras.layers.advanced_activations import ReLU
from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU
from tensorflow.python.keras.layers.advanced_activations import Softmax
# Convolution layers.
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.convolutional import Conv3D
from tensorflow.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConv1D
from tensorflow.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConvolution1D
from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D
from tensorflow.python.keras.layers.convolutional import DepthwiseConv2D
# Image processing layers.
from tensorflow.python.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras.layers.convolutional import Cropping3D
# Core layers.
from tensorflow.python.keras.layers.core import Masking
from tensorflow.python.keras.layers.core import Dropout
from tensorflow.python.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Reshape
from tensorflow.python.keras.layers.core import Permute
from tensorflow.python.keras.layers.core import Flatten
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.layers.core import Lambda
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.core import ActivityRegularization
# Dense Attention layers.
from tensorflow.python.keras.layers.dense_attention import AdditiveAttention
from tensorflow.python.keras.layers.dense_attention import Attention
# Embedding layers.
from tensorflow.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras.layers.merge import Add
from tensorflow.python.keras.layers.merge import Subtract
from tensorflow.python.keras.layers.merge import Multiply
from tensorflow.python.keras.layers.merge import Average
from tensorflow.python.keras.layers.merge import Maximum
from tensorflow.python.keras.layers.merge import Minimum
from tensorflow.python.keras.layers.merge import Concatenate
from tensorflow.python.keras.layers.merge import Dot
from tensorflow.python.keras.layers.merge import add
from tensorflow.python.keras.layers.merge import subtract
from tensorflow.python.keras.layers.merge import multiply
from tensorflow.python.keras.layers.merge import average
from tensorflow.python.keras.layers.merge import maximum
from tensorflow.python.keras.layers.merge import minimum
from tensorflow.python.keras.layers.merge import concatenate
from tensorflow.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras.layers.noise import AlphaDropout
from tensorflow.python.keras.layers.noise import GaussianNoise
from tensorflow.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras.layers.normalization import LayerNormalization
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.layers.normalization_v2 import BatchNormalization as BatchNormalizationV2
# Kernelized layers.
from tensorflow.python.keras.layers.kernelized import RandomFourierFeatures
# Pooling layers.
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.keras.layers.recurrent import AbstractRNNCell
from tensorflow.python.keras.layers.recurrent import StackedRNNCells
from tensorflow.python.keras.layers.recurrent import SimpleRNNCell
from tensorflow.python.keras.layers.recurrent import PeepholeLSTMCell
from tensorflow.python.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras.layers.recurrent import GRU
from tensorflow.python.keras.layers.recurrent import GRUCell
from tensorflow.python.keras.layers.recurrent import LSTM
from tensorflow.python.keras.layers.recurrent import LSTMCell
from tensorflow.python.keras.layers.recurrent_v2 import GRU as GRU_v2
from tensorflow.python.keras.layers.recurrent_v2 import GRUCell as GRUCell_v2
from tensorflow.python.keras.layers.recurrent_v2 import LSTM as LSTM_v2
from tensorflow.python.keras.layers.recurrent_v2 import LSTMCell as LSTMCell_v2
# Convolutional-recurrent layers.
from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# CuDNN recurrent layers.
from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNLSTM
from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNGRU
# Wrapper functions
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras.layers.wrappers import TimeDistributed
# # RNN Cell wrappers.
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import DeviceWrapper
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import DropoutWrapper
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
# Serialization functions
from tensorflow.python.keras.layers.serialization import deserialize
from tensorflow.python.keras.layers.serialization import serialize
del absolute_import
del division
del print_function
| tensorflow-master | tensorflow/python/keras/layers/__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return self.noise_shape
return nn_ops._get_noise_shape(inputs, self.noise_shape) # pylint: disable=protected-access
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 4D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@keras_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: Target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises:
ValueError: If the total array size of the output_shape is
different than the input_shape, or more than one unknown dimension
is specified.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
If inputs are shaped `(batch,)` without a channel dimension, then flattening
adds an extra channel dimension and output shapes are `(batch, 1)`.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
def call(self, inputs):
if (self.data_format == 'channels_first'
and K.ndim(inputs) is not None and K.ndim(inputs) > 1):
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
outputs = array_ops.reshape(
inputs, (tensor_shape.dimension_value(inputs.shape[0]) or
array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: Integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary TensorFlow functions
can be used when constructing `Sequential` and Functional API
models. `Lambda` layers are best suited for simple operations or
quick experimentation. For more advanced use cases, subclassing
`keras.layers.Layer` is preferred. One reason for this is that
when saving a Model, `Lambda` layers are saved by serializing the
Python bytecode, whereas subclassed Layers are saved via overriding
their `get_config` method and are thus more portable. Models that rely
on subclassed Layers are also often easier to visualize and reason
about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Variables can be created within a `Lambda` layer. Like with
other layers, these variables will be created only once and reused
if the `Lambda` layer is called on new inputs. If creating more
than one variable in a given `Lambda` instance, be sure to use
a different name for each variable. Note that calling sublayers
from within a `Lambda` is not supported.
Example of variable creation:
```python
def linear_transform(x):
v1 = tf.Variable(1., name='multiplier')
v2 = tf.Variable(0., name='bias')
return x*v1 + v2
linear_layer = Lambda(linear_transform)
model.add(linear_layer)
model.add(keras.layers.Dense(10, activation='relu'))
model.add(linear_layer) # Reuses existing Variables
```
Note that creating two instances of `Lambda` using the same function
will *not* share Variables between the two instances. Each instance of
`Lambda` will create and manage its own weights.
Arguments:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input: `output_shape =
(input_shape[0], ) + output_shape` or, the input is `None` and
the sample dimension is also `None`: `output_shape = (None, ) +
output_shape` If a function, it specifies the entire shape as a function
of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape: Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Specified by `output_shape` argument
"""
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
self._variable_dict = {}
# These attributes are inherited from `Layer`.
self._trainable_weights = []
self._non_trainable_weights = []
function_args = tf_inspect.getfullargspec(self.function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
arguments = self.arguments
if self._fn_expects_mask_arg:
arguments['mask'] = mask
if self._fn_expects_training_arg:
arguments['training'] = training
with variable_scope.variable_creator_scope(self._variable_creator):
return self.function(inputs, **arguments)
def _variable_creator(self, next_creator, **kwargs):
name = kwargs['name']
if name in self._variable_dict:
return self._variable_dict[name]
var = next_creator(**kwargs)
self._variable_dict[name] = var
if var.trainable:
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
K.track_variable(var)
return var
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: If the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs)
rank = common_shapes.rank(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
# Cast the inputs to self.dtype, which is the variable dtype. We do not
# cast if `should_cast_variables` is True, as in that case the variable
# will be automatically casted to inputs.dtype.
if not self._mixed_precision_policy.should_cast_variables:
inputs = math_ops.cast(inputs, self.dtype)
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tensorflow-master | tensorflow/python/keras/layers/core.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional transpose layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class Conv2DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2DTranspose,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('strides_output_padding', {'strides': (2, 2), 'output_padding': (1, 1)}),
)
def test_conv2d_transpose(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv2d_transpose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_transpose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv2d_transpose_dilation(self):
testing_utils.layer_test(keras.layers.Conv2DTranspose,
kwargs={'filters': 2,
'kernel_size': 3,
'padding': 'same',
'data_format': 'channels_last',
'dilation_rate': (2, 2)},
input_shape=(2, 5, 6, 3))
input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32)
expected_output = np.float32([[192, 228, 192, 228],
[336, 372, 336, 372],
[192, 228, 192, 228],
[336, 372, 336, 372]]).reshape((1, 4, 4, 1))
testing_utils.layer_test(keras.layers.Conv2DTranspose,
input_data=input_data,
kwargs={'filters': 1,
'kernel_size': 3,
'padding': 'same',
'data_format': 'channels_last',
'dilation_rate': (2, 2),
'kernel_initializer': 'ones'},
expected_output=expected_output)
@keras_parameterized.run_all_keras_modes
class Conv3DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('strides_output_padding', {'strides': (2, 2, 2),
'output_padding': (1, 1, 1)}),
)
def test_conv3d_transpose(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv3d_transpose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_transpose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_transpose_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session(use_gpu=True):
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
| tensorflow-master | tensorflow/python/keras/layers/convolutional_transpose_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertEqual(ld.function(3), 4)
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
output_signature = l.compute_output_signature([
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 10)),
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 20))])
self.assertAllEqual((10, 20), output_signature.shape)
self.assertAllEqual(dtypes.float64, output_signature.dtype)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_nested(self):
def lambda_fn(inputs):
return (inputs[1]['a'], {'b': inputs[0]})
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)}))
self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(
lambda x: x + 1,
output_shape=(1, 1),
mask=lambda i, m: m)
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
layer = keras.layers.Lambda.from_config(config)
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
def test_lambda_with_variable(self):
def fn(x):
return x * variables.Variable(2., name='multiplier')
layer = keras.layers.Lambda(fn)
for _ in range(10):
layer(np.ones((10, 10), 'float32'))
self.assertLen(layer.trainable_weights, 1)
self.assertEqual(layer.trainable_weights[0].name, 'lambda/multiplier:0')
def test_lambda_with_training_arg(self):
def fn(x, training=True):
return keras.backend.in_train_phase(x, 2 * x, training=training)
layer = keras.layers.Lambda(fn)
x = keras.backend.ones(())
train_out = layer(x, training=True)
eval_out = layer(x, training=False)
self.assertEqual(keras.backend.get_value(train_out), 1.)
self.assertEqual(keras.backend.get_value(eval_out), 2.)
def test_lambda_with_mask(self):
def add_one(inputs):
return inputs + 1.0
def mask(unused_inputs, previous_mask):
return previous_mask
layer = keras.layers.Lambda(add_one, mask=mask)
x = np.ones([5, 4, 3])
x[:, -1, :] = 0
masking = keras.layers.Masking()
out = layer(masking(x))
expected_out = np.full([5, 4, 3], 2.0)
expected_out[:, -1, :] = 1.0
expected_mask = np.ones([5, 4])
expected_mask[:, -1] = 0.0
self.assertAllClose(self.evaluate(out), expected_out)
self.assertIsNotNone(out._keras_mask)
self.assertAllClose(self.evaluate(out._keras_mask), expected_mask)
class TestStatefulLambda(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_lambda_with_variable_in_model(self):
def lambda_fn(x):
# Variable will only get created once.
v = variables.Variable(1., trainable=True)
return x * v
model = testing_utils.get_model_from_layers(
[keras.layers.Lambda(lambda_fn)], input_shape=(10,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)
self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_compute_mask_with_positional_mask_arg(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, inputs, mask=None):
if mask is not None:
return array_ops.ones(())
else:
return array_ops.zeros(())
x, mask = array_ops.ones((1, 1)), array_ops.ones((1, 1))
layer = MyLayer()
y = layer(x, mask)
# Check that `mask` was correctly sent to `compute_mask`.
self.assertEqual(keras.backend.get_value(y._keras_mask), 1)
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_dtype(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)), dtype='float16')
layer = keras.layers.Dense(5, dtype=policy.Policy('infer_float32_vars'))
outputs = layer(inputs)
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype='float16', shape=(2, 2)))
self.assertEqual(output_signature.dtype, dtypes.float16)
self.assertEqual(output_signature.shape, (2, 5))
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if context.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/core_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pooling layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_masking_support(self):
model = keras.Sequential()
model.add(keras.layers.Masking(mask_value=0., input_shape=(3, 4)))
model.add(keras.layers.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='rmsprop')
model_input = np.random.random((2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
self.assertAllClose(output[0], model_input[0, 0, :])
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
class Pooling2DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
testing_utils.layer_test(
keras.layers.MaxPooling2D,
kwargs={
'strides': strides,
'padding': 'valid',
'pool_size': pool_size
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# This part of the test can only run on GPU but doesn't appear
# to be properly assigned to a GPU when running in eager mode.
if not context.executing_eagerly():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
class Pooling3DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
class Pooling1DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/pooling_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing for RNN wrappers for TF v2."""
# Note that all the APIs under this module are exported as tf.nn.*. This is due
# to the fact that those APIs were from tf.nn.rnn_cell_impl. They are ported
# here to avoid the cyclic dependency issue for serialization. These APIs will
# probably be deprecated and removed in future since similar API is available in
# existing Keras RNN API.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.layers import AbstractRNNCell
from tensorflow.python.ops import rnn_cell_wrapper_impl
from tensorflow.python.util.tf_export import tf_export
class _RNNCellWrapperV2(AbstractRNNCell):
"""Base class for cells wrappers V2 compatibility.
This class along with `rnn_cell_impl._RNNCellWrapperV1` allows to define
wrappers that are compatible with V1 and V2, and defines helper methods for
this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapperV2, self).__init__(*args, **kwargs)
self.cell = cell
def call(self, inputs, state, **kwargs):
"""Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.call, **kwargs)
def build(self, inputs_shape):
"""Builds the wrapped cell."""
self.cell.build(inputs_shape)
self.built = True
def get_config(self):
config = {
"cell": {
"class_name": self.cell.__class__.__name__,
"config": self.cell.get_config()
},
}
base_config = super(_RNNCellWrapperV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop("cell"), custom_objects=custom_objects)
return cls(cell, **config)
@tf_export("nn.RNNCellDropoutWrapper", v1=[])
class DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase,
_RNNCellWrapperV2):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DropoutWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__
@tf_export("nn.RNNCellResidualWrapper", v1=[])
class ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase,
_RNNCellWrapperV2):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(ResidualWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__
@tf_export("nn.RNNCellDeviceWrapper", v1=[])
class DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase,
_RNNCellWrapperV2):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__
| tensorflow-master | tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Embedding')
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used as the first layer in a model.
Example:
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch,
# input_length).
# the largest integer (i.e. word index) in the input should be no larger
# than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch
# dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
Arguments:
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix.
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix.
embeddings_constraint: Constraint function applied to
the `embeddings` matrix.
mask_zero: Whether or not the input value 0 is a special "padding"
value that should be masked out.
This is useful when using recurrent layers
which may take variable length input.
If this is `True` then all subsequent layers
in the model need to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence, index 0 cannot be
used in the vocabulary (input_dim should equal size of
vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
dtype = kwargs.pop('dtype', K.floatx())
super(Embedding, self).__init__(dtype=dtype, **kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note: most sparse optimizers do not have GPU kernels defined. When
# building graphs, the placement algorithm is able to place variables on CPU
# since it knows all kernels using the variable only exist on CPU.
# When eager execution is enabled, the placement decision has to be made
# right now. Checking for the presence of GPUs to avoid complicating the
# TPU codepaths which can handle sparse optimizers.
if context.executing_eagerly() and context.context().num_gpus():
with ops.device('cpu:0'):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
else:
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return math_ops.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
# input_length can be tuple if input is 3D or higher
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
raise ValueError('"input_length" is %s, '
'but received input has shape %s' % (str(
self.input_length), str(input_shape)))
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
raise ValueError('"input_length" is %s, '
'but received input has shape %s' % (str(
self.input_length), str(input_shape)))
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = K.dtype(inputs)
if dtype != 'int32' and dtype != 'int64':
inputs = math_ops.cast(inputs, 'int32')
out = embedding_ops.embedding_lookup(self.embeddings, inputs)
return out
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'embeddings_constraint':
constraints.serialize(self.embeddings_constraint),
'mask_zero': self.mask_zero,
'input_length': self.input_length
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tensorflow-master | tensorflow/python/keras/layers/embeddings.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU V2 layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True, True),
('unroll', 'tanh', 'sigmoid', 0, True, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False, True),
('not_reset_after', 'tanh', 'sigmoid', 0, False, True, False)
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias,
reset_after):
layer = rnn.GRU(1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after)
self.assertFalse(layer.could_use_cudnn)
def test_keras_model_with_gru(self):
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = rnn.GRU(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = rnn.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
# Due to b/120160788
@test_util.run_v2_only
def test_gru_v2_feature_parity_with_canonical_gru(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=random_seed.DEFAULT_GRAPH_SEED)
y_train = keras.utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
gru_layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = gru_layer(masked_input)
gru_model = keras.models.Model(inputs, output)
weights = gru_model.get_weights()
y_1 = gru_model.predict(x_train)
gru_model.compile('rmsprop', 'mse')
gru_model.fit(x_train, y_train)
y_2 = gru_model.predict(x_train)
with test_util.device(use_gpu=True):
cudnn_layer = rnn.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=2e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=2e-5, atol=2e-5)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_gru_v2_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.GRU(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_gru_v2_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with test_util.device(use_gpu=False):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with test_util.device(use_gpu=True):
layer = rnn.GRU(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the GRU V2 uses
# 'sigmoid' as default. Construct the canonical GRU with sigmoid to achieve
# the same output.
with test_util.device(use_gpu=True):
layer = rnn_v1.GRU(rnn_state_size,
recurrent_activation='sigmoid',
reset_after=True)
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
canonical_model.set_weights(weights)
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2)
self.assertAllClose(y_2, y_3)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards,
reset_after=True)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
gru_model = build_model(rnn_v1.GRU)
y_ref = gru_model.predict(x_train)
weights = gru_model.get_weights()
gru_v2_model = build_model(rnn.GRU)
gru_v2_model.set_weights(weights)
y = gru_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
def test_with_masking_layer_GRU(self):
layer_class = rnn.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.GRU(10, return_sequences=True, unroll=False))
model.add(rnn.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_return_states_GRU(self):
layer_class = rnn.GRU
x = np.random.random((2, 3, 4))
y = np.abs(np.random.random((2, 5)))
s = np.abs(np.random.random((2, 5)))
inputs = keras.layers.Input(
shape=[3, 4], dtype=dtypes.float32)
masked = keras.layers.Masking()(inputs)
outputs, states = layer_class(units=5, return_state=True)(masked)
model = keras.models.Model(inputs, [outputs, states])
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = rnn.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
# Run in V2 only due to b/120160788.
@test_util.run_v2_only
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse', run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_GRU_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.GRU(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
class GRULayerGradientTapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(config=_config)
def test_in_tape(self):
if not context.executing_eagerly():
self.skipTest('bloo')
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru = rnn.GRU(gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
x = random_ops.random_uniform([1, time_steps, embedding_size])
y = random_ops.random_uniform([1, gru_unit_size])
with backprop.GradientTape() as tape:
hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)
_, state = gru(x, initial_state=hidden_state)
loss = math_ops.reduce_mean(math_ops.square(state - y))
tape.gradient(loss, gru.variables)
@keras_parameterized.run_all_keras_modes(config=_config)
class GRUGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
def test_GRU_runtime(self):
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
# Due to b/120160788.
@test_util.run_v2_only
def test_UnifiedGRU_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
zeros = array_ops.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the GRU layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/gru_v2_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Wrapper layers: layers that augment the functionality of another layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.recurrent import _standardize_args
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Wrapper')
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Arguments:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
assert isinstance(layer, Layer)
self.layer = layer
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
self._input_map = {}
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
def get_config(self):
config = {
'layer': {
'class_name': self.layer.__class__.__name__,
'config': self.layer.get_config()
}
}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
@keras_export('keras.layers.TimeDistributed')
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
Arguments:
layer: a layer instance.
Call arguments:
inputs: Input tensor.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
wrapped layer (only if the layer supports this argument).
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. This argument is passed to the
wrapped layer (only if the layer supports this argument).
Raises:
ValueError: If not initialized with a `Layer` instance.
"""
def __init__(self, layer, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
'Please initialize `TimeDistributed` layer with a '
'`Layer` instance. You passed: {input}'.format(input=layer))
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
# It is safe to use the fast, reshape-based approach with all of our
# built-in Layers.
self._always_use_reshape = (
layer_utils.is_builtin_layer(layer) and
not getattr(layer, 'stateful', False))
def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
"""Finds non-specific dimensions in the static shapes.
The static shapes are replaced with the corresponding dynamic shapes of the
tensor.
Arguments:
init_tuple: a tuple, the first part of the output shape
tensor: the tensor from which to get the (static and dynamic) shapes
as the last part of the output shape
start_idx: int, which indicate the first dimension to take from
the static shape of the tensor
int_shape: an alternative static shape to take as the last part
of the output shape
Returns:
The new int_shape with the first part from init_tuple
and the last part from either `int_shape` (if provided)
or `tensor.shape`, where every `None` is replaced by
the corresponding dimension from `tf.shape(tensor)`.
"""
# replace all None in int_shape by K.shape
if int_shape is None:
int_shape = K.int_shape(tensor)[start_idx:]
if not any(not s for s in int_shape):
return init_tuple + tuple(int_shape)
shape = K.shape(tensor)
int_shape = list(int_shape)
for i, s in enumerate(int_shape):
if not s:
int_shape[i] = shape[start_idx + i]
return init_tuple + tuple(int_shape)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if len(input_shape) < 3:
raise ValueError(
'`TimeDistributed` Layer should be passed an `input_shape ` '
'with at least 3 dimensions, received: ' + str(input_shape))
# Don't enforce the batch or time dimension.
self.input_spec = InputSpec(shape=[None, None] + input_shape[2:])
child_input_shape = [input_shape[0]] + input_shape[2:]
super(TimeDistributed, self).build(tuple(child_input_shape))
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
child_input_shape = tensor_shape.TensorShape([input_shape[0]] +
input_shape[2:])
child_output_shape = self.layer.compute_output_shape(child_input_shape)
if not isinstance(child_output_shape, tensor_shape.TensorShape):
child_output_shape = tensor_shape.TensorShape(child_output_shape)
child_output_shape = child_output_shape.as_list()
timesteps = input_shape[1]
return tensor_shape.TensorShape([child_output_shape[0], timesteps] +
child_output_shape[1:])
def call(self, inputs, training=None, mask=None):
kwargs = {}
if generic_utils.has_arg(self.layer.call, 'training'):
kwargs['training'] = training
input_shape = K.int_shape(inputs)
if input_shape[0] and not self._always_use_reshape:
# batch size matters, use rnn-based implementation
def step(x, _):
output = self.layer.call(x, **kwargs)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
input_length=input_shape[1],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = array_ops.shape(inputs)[1]
inner_input_shape = self._get_shape_tuple((-1,), inputs, 2)
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = generic_utils.object_list_uid(inputs)
inputs = array_ops.reshape(inputs, inner_input_shape)
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None:
inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
kwargs['mask'] = K.reshape(mask, inner_mask_shape)
y = self.layer.call(inputs, **kwargs)
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape).as_list()
output_shape = self._get_shape_tuple(
(-1, input_length), y, 1, output_shape[2:])
y = array_ops.reshape(y, output_shape)
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
base_layer_utils.check_graph_consistency(
regularization_loss, method='activity_regularizer')
self.add_loss(regularization_loss, inputs)
return y
def compute_mask(self, inputs, mask=None):
"""Computes an output mask tensor for Embedding layer.
This is based on the inputs, mask, and the inner layer.
If batch size is specified:
Simply return the input `mask`. (An rnn-based implementation with
more than one rnn inputs is required but not supported in tf.keras yet.)
Otherwise we call `compute_mask` of the inner layer at each time step.
If the output mask at each time step is not `None`:
(E.g., inner layer is Masking or RNN)
Concatenate all of them and return the concatenation.
If the output mask at each time step is `None` and the input mask is not
`None`:(E.g., inner layer is Dense)
Reduce the input_mask to 2 dimensions and return it.
Otherwise (both the output mask and the input mask are `None`):
(E.g., `mask` is not used at all)
Return `None`.
Arguments:
inputs: Tensor with shape [batch size, timesteps, ...] indicating the
input to TimeDistributed. If static shape information is available for
"batch size", `mask` is returned unmodified.
mask: Either None (indicating no masking) or a Tensor indicating the
input mask for TimeDistributed. The shape can be static or dynamic.
Returns:
Either None (no masking), or a [batch size, timesteps, ...] Tensor with
an output mask for the TimeDistributed layer with the shape beyond the
second dimension being the value of the input mask shape(if the computed
output mask is none), an output mask with the shape beyond the first
dimension being the value of the mask shape(if mask is not None) or
output mask with the shape beyond the first dimension being the
value of the computed output shape.
"""
# cases need to call the layer.compute_mask when input_mask is None:
# Masking layer and Embedding layer with mask_zero
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, we currently do not handle mask explicitly
return mask
inner_mask = mask
if inner_mask is not None:
inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
inner_mask = K.reshape(inner_mask, inner_mask_shape)
input_uid = generic_utils.object_list_uid(inputs)
inner_inputs = self._input_map.get(input_uid, inputs)
output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
if output_mask is None:
if mask is None:
return None
# input_mask is not None, and output_mask is None:
# we should return a not-None mask
output_mask = mask
for _ in range(2, len(K.int_shape(mask))):
output_mask = K.any(output_mask, axis=-1)
else:
# output_mask is not None. We need to reshape it
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
output_mask_int_shape = K.int_shape(output_mask)
if output_mask_int_shape is None:
# if the output_mask does not have a static shape,
# its shape must be the same as mask's
if mask is not None:
output_mask_int_shape = K.int_shape(mask)
else:
output_mask_int_shape = K.compute_output_shape(input_shape)[:-1]
output_mask_shape = self._get_shape_tuple(
(-1, input_length), output_mask, 1, output_mask_int_shape[1:])
output_mask = K.reshape(output_mask, output_mask_shape)
return output_mask
@keras_export('keras.layers.Bidirectional')
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Arguments:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
backward_layer: Optional `Recurrent` instance to be used to handle
backwards input processing. If `backward_layer` is not provided,
the layer instance passed as the `layer` argument will be used to
generate the backward layer automatically.
Note that the provided `backward_layer` layer should have properties
matching those of the `layer` argument, in particular it should have the
same values for `stateful`, `return_states`, `return_sequence`, etc.
In addition, `backward_layer` and `layer` should have
different `go_backwards` argument values.
A `ValueError` will be raised if these requirements are not met.
Call arguments:
The call arguments for this layer are the same as those of the wrapped RNN
layer.
Raises:
ValueError:
1. If `layer` or `backward_layer` is not a `Layer` instance.
2. In case of invalid `merge_mode` argument.
3. If `backward_layer` has mismatched properties compared to `layer`.
Examples:
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# With custom backward layer
model = Sequential()
forward_layer = LSTM(10, return_sequences=True)
backard_layer = LSTM(10, activation='relu', return_sequences=True,
go_backwards=True)
model.add(Bidirectional(forward_layer, backward_layer=backward_layer,
input_shape=(5, 10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self,
layer,
merge_mode='concat',
weights=None,
backward_layer=None,
**kwargs):
if not isinstance(layer, Layer):
raise ValueError(
'Please initialize `Bidirectional` layer with a '
'`Layer` instance. You passed: {input}'.format(input=layer))
if backward_layer is not None and not isinstance(backward_layer, Layer):
raise ValueError('`backward_layer` need to be a `Layer` instance. '
'You passed: {input}'.format(input=backward_layer))
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError('Invalid merge mode. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
# Recreate the forward layer from the original layer config, so that it will
# not carry over any state from the layer.
self.forward_layer = self._recreate_layer_from_config(layer)
if backward_layer is None:
self.backward_layer = self._recreate_layer_from_config(
layer, go_backwards=True)
else:
self.backward_layer = backward_layer
# Keep the custom backward layer config, so that we can save it later. The
# layer's name might be updated below with prefix 'backward_', and we want
# to preserve the original config.
self._backward_layer_config = backward_layer.get_config()
self.forward_layer._name = 'forward_' + self.forward_layer.name
self.backward_layer._name = 'backward_' + self.backward_layer.name
self._verify_layer_config()
def force_zero_output_for_mask(layer):
# Force the zero_output_for_mask to be True if returning sequences.
if getattr(layer, 'zero_output_for_mask', None) is not None:
layer.zero_output_for_mask = layer.return_sequences
force_zero_output_for_mask(self.forward_layer)
force_zero_output_for_mask(self.backward_layer)
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self._trainable = True
self._num_constants = 0
# We don't want to track `layer` since we're already tracking the two copies
# of it we actually run.
self._setattr_tracking = False
super(Bidirectional, self).__init__(layer, **kwargs)
self._setattr_tracking = True
self.input_spec = layer.input_spec
def _verify_layer_config(self):
"""Ensure the forward and backward layers have valid common property."""
if self.forward_layer.go_backwards == self.backward_layer.go_backwards:
raise ValueError('Forward layer and backward layer should have different '
'`go_backwards` value.')
common_attributes = ('stateful', 'return_sequences', 'return_state')
for a in common_attributes:
forward_value = getattr(self.forward_layer, a)
backward_value = getattr(self.backward_layer, a)
if forward_value != backward_value:
raise ValueError(
'Forward layer and backward layer are expected to have the same '
'value for attribute {attr}, got {forward} and {backward}'.format(
attr=a, forward=forward_value, backward=backward_value))
def _recreate_layer_from_config(self, layer, go_backwards=False):
# When recreating the layer from its config, it is possible that the layer
# is a RNN layer that contains custom cells. In this case we inspect the
# layer and pass the custom cell class as part of the `custom_objects`
# argument when calling `from_config`.
# See https://github.com/tensorflow/tensorflow/issues/26581 for more detail.
config = layer.get_config()
if go_backwards:
config['go_backwards'] = not config['go_backwards']
if 'custom_objects' in tf_inspect.getfullargspec(
layer.__class__.from_config).args:
custom_objects = {}
cell = getattr(layer, 'cell', None)
if cell is not None:
custom_objects[cell.__class__.__name__] = cell.__class__
# For StackedRNNCells
stacked_cells = getattr(cell, 'cells', [])
for c in stacked_cells:
custom_objects[c.__class__.__name__] = c.__class__
return layer.__class__.from_config(config, custom_objects=custom_objects)
else:
return layer.__class__.from_config(config)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = self.forward_layer.compute_output_shape(input_shape)
if not isinstance(output_shape, tensor_shape.TensorShape):
output_shape = tensor_shape.TensorShape(output_shape)
output_shape = tuple(output_shape.as_list())
if self.return_state:
state_shape = output_shape[1:]
output_shape = output_shape[0]
if self.merge_mode == 'concat':
output_shape = list(output_shape)
output_shape[-1] *= 2
output_shape = tuple(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, copy.copy(output_shape)]
if self.return_state:
if self.merge_mode is None:
return output_shape + state_shape + copy.copy(state_shape)
return [output_shape] + state_shape + copy.copy(state_shape)
return output_shape
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
"""`Bidirectional.__call__` implements the same API as the wrapped `RNN`."""
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if isinstance(inputs, list):
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
if initial_state is None and constants is None:
return super(Bidirectional, self).__call__(inputs, **kwargs)
# Applies the same workaround as in `RNN.__call__`
additional_inputs = []
additional_specs = []
if initial_state is not None:
# Check if `initial_state` can be splitted into half
num_states = len(initial_state)
if num_states % 2 > 0:
raise ValueError(
'When passing `initial_state` to a Bidirectional RNN, '
'the state should be a list containing the states of '
'the underlying RNNs. '
'Found: ' + str(initial_state))
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
state_specs = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
self.forward_layer.state_spec = state_specs[:num_states // 2]
self.backward_layer.state_spec = state_specs[num_states // 2:]
additional_specs += state_specs
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self.forward_layer.constants_spec = constants_spec
self.backward_layer.constants_spec = constants_spec
additional_specs += constants_spec
self._num_constants = len(constants)
self.forward_layer._num_constants = self._num_constants
self.backward_layer._num_constants = self._num_constants
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state of a Bidirectional'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = [None for _ in range(len(nest.flatten(inputs)))
] + additional_specs
# Removing kwargs since the value are passed with input list.
kwargs['initial_state'] = None
kwargs['constants'] = None
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(Bidirectional, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
def call(self,
inputs,
training=None,
mask=None,
initial_state=None,
constants=None):
"""`Bidirectional.call` implements the same API as the wrapped `RNN`."""
kwargs = {}
if generic_utils.has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if generic_utils.has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
if generic_utils.has_arg(self.layer.call, 'constants'):
kwargs['constants'] = constants
if generic_utils.has_arg(self.layer.call, 'initial_state'):
if isinstance(inputs, list) and len(inputs) > 1:
# initial_states are keras tensors, which means they are passed in
# together with inputs as list. The initial_states need to be split into
# forward and backward section, and be feed to layers accordingly.
forward_inputs = [inputs[0]]
backward_inputs = [inputs[0]]
pivot = (len(inputs) - self._num_constants) // 2 + 1
# add forward initial state
forward_inputs += inputs[1:pivot]
if not self._num_constants:
# add backward initial state
backward_inputs += inputs[pivot:]
else:
# add backward initial state
backward_inputs += inputs[pivot:-self._num_constants]
# add constants for forward and backward layers
forward_inputs += inputs[-self._num_constants:]
backward_inputs += inputs[-self._num_constants:]
forward_state, backward_state = None, None
if 'constants' in kwargs:
kwargs['constants'] = None
elif initial_state is not None:
# initial_states are not keras tensors, eg eager tensor from np array.
# They are only passed in from kwarg initial_state, and should be passed
# to forward/backward layer via kwarg initial_state as well.
forward_inputs, backward_inputs = inputs, inputs
half = len(initial_state) // 2
forward_state = initial_state[:half]
backward_state = initial_state[half:]
else:
forward_inputs, backward_inputs = inputs, inputs
forward_state, backward_state = None, None
y = self.forward_layer.call(forward_inputs,
initial_state=forward_state, **kwargs)
y_rev = self.backward_layer.call(backward_inputs,
initial_state=backward_state, **kwargs)
else:
y = self.forward_layer.call(inputs, **kwargs)
y_rev = self.backward_layer.call(inputs, **kwargs)
if self.return_state:
states = y[1:] + y_rev[1:]
y = y[0]
y_rev = y_rev[0]
if self.return_sequences:
y_rev = K.reverse(y_rev, 1)
if self.merge_mode == 'concat':
output = K.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
else:
raise ValueError(
'Unrecognized value for `merge_mode`: %s' % (self.merge_mode))
if self.return_state:
if self.merge_mode is None:
return output + states
return [output] + states
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
if self.return_sequences:
if not self.merge_mode:
output_mask = [mask, mask]
else:
output_mask = mask
else:
output_mask = [None, None] if not self.merge_mode else None
if self.return_state:
states = self.forward_layer.states
state_mask = [None for _ in states]
if isinstance(output_mask, list):
return output_mask + state_mask * 2
return [output_mask] + state_mask * 2
return output_mask
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
if self._num_constants:
config['num_constants'] = self._num_constants
if hasattr(self, '_backward_layer_config'):
config['backward_layer'] = {
'class_name': self.backward_layer.__class__.__name__,
'config': self._backward_layer_config,
}
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Instead of updating the input, create a copy and use that.
config = config.copy()
num_constants = config.pop('num_constants', 0)
backward_layer_config = config.pop('backward_layer', None)
if backward_layer_config is not None:
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
backward_layer = deserialize_layer(
backward_layer_config, custom_objects=custom_objects)
config['backward_layer'] = backward_layer
layer = super(Bidirectional, cls).from_config(config,
custom_objects=custom_objects)
layer._num_constants = num_constants
return layer
| tensorflow-master | tensorflow/python/keras/layers/wrappers.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for separable convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class SeparableConv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('padding_causal', {'padding': 'causal'}),
('strides', {'strides': 2}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv1d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
self._run_test(kwargs)
def test_separable_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv1d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class SeparableConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('strides', {'strides': 2}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv2d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_separable_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv2d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
| tensorflow-master | tensorflow/python/keras/layers/separable_convolutional_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers for TF 2.0.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util.tf_export import keras_export
# The following string constants are used by Defun approach for unified backend
# of LSTM and GRU.
_DEFUN_API_NAME_ATTRIBUTE = 'api_implements'
_DEFUN_DEVICE_ATTRIBUTE = 'api_preferred_device'
_CPU_DEVICE_NAME = 'CPU'
_GPU_DEVICE_NAME = 'GPU'
# The following number constants are used to represent the runtime of the defun
# backend function. Since the CPU/GPU implementation are mathematically same, we
# need some signal for the function to indicate which function is executed. This
# is for testing purpose to verify the correctness of swapping backend function.
_RUNTIME_UNKNOWN = 0
_RUNTIME_CPU = 1
_RUNTIME_GPU = 2
@keras_export('keras.layers.GRUCell', v1=[])
class GRUCell(recurrent.GRUCell):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 (default) will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
reset_after=True,
**kwargs):
super(GRUCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
**kwargs)
@keras_export('keras.layers.GRU', v1=[])
class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU):
"""Gated Recurrent Unit - Cho et al. 2014.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == 'tanh'
2. `recurrent_activation` == 'sigmoid'
3. `recurrent_dropout` == 0
4. `unroll` is False
5. `use_bias` is True
6. `reset_after` is True
7. Inputs are not masked or strictly right padded.
There are two variants of the GRU implementation. The default one is based on
[v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden
state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and CuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
reset_after=True,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self._return_runtime = kwargs.pop('return_runtime', False)
super(GRU, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
time_major=time_major,
reset_after=reset_after,
**kwargs)
# CuDNN uses following setting by default and not configurable.
self.could_use_cudnn = (
activation == 'tanh' and recurrent_activation == 'sigmoid' and
recurrent_dropout == 0 and not unroll and use_bias and
reset_after)
def call(self, inputs, mask=None, training=None, initial_state=None):
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self.could_use_cudnn:
kwargs = {'training': training}
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
def step(cell_inputs, cell_states):
return self.cell.call(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
# This is a dummy tensor for testing purpose.
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask)
if self.stateful:
updates = [state_ops.assign(self.states[0], states[0])]
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
def _defun_gru_call(self, inputs, initial_state, training, mask):
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
cudnn_gru_kwargs = {
'inputs': inputs,
'init_h': initial_state[0],
'kernel': self.cell.kernel,
'recurrent_kernel': self.cell.recurrent_kernel,
'bias': self.cell.bias,
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards
}
normal_gru_kwargs = cudnn_gru_kwargs.copy()
normal_gru_kwargs.update({
'activation': self.activation,
'recurrent_activation': self.recurrent_activation
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME
or (device_type is None and context.num_gpus() > 0))
and
(mask is None or is_sequence_right_padded(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
last_output, outputs, new_h, runtime = cudnn_gru(**cudnn_gru_kwargs)
else:
last_output, outputs, new_h, runtime = standard_gru(**normal_gru_kwargs)
else:
if mask is None:
last_output, outputs, new_h, runtime = gru_with_backend_selection(
normal_gru_kwargs, cudnn_gru_kwargs)
else:
def with_mask_support():
# TODO(b/134702514): Change to use backend selection.
# return gru_with_backend_selection(normal_gru_kwargs,
# cudnn_gru_kwargs)
return standard_gru(**normal_gru_kwargs)
def without_mask_support():
return standard_gru(**normal_gru_kwargs)
last_output, outputs, new_h, runtime = control_flow_ops.cond(
is_sequence_right_padded(mask, self.time_major),
true_fn=with_mask_support,
false_fn=without_mask_support)
states = [new_h]
return last_output, outputs, runtime, states
def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, activation,
recurrent_activation, mask, time_major, go_backwards):
"""GRU with standard kernel implementation.
This implementation can be run on all types of hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Arguments:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. The bias contains the
combined input_bias and recurrent_bias.
activation: Activation function to use for output.
recurrent_activation: Activation function to use for hidden recurrent state.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
input_bias, recurrent_bias = array_ops.unstack(bias)
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(cell_inputs, kernel)
matrix_x = K.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=1)
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, recurrent_kernel)
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
recurrent_z, recurrent_r, recurrent_h = array_ops.split(matrix_inner, 3,
axis=1)
z = recurrent_activation(x_z + recurrent_z)
r = recurrent_activation(x_r + recurrent_r)
hh = activation(x_h + r * recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=timesteps)
return last_output, outputs, new_states[0], _runtime(_RUNTIME_CPU)
def cudnn_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major,
go_backwards):
"""GRU with CuDNN implementation which is only available for GPU."""
if not time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
init_h = array_ops.expand_dims(init_h, axis=0)
weights = array_ops.split(kernel, 3, axis=1)
weights += array_ops.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flat it into
# (6 * units)
bias = array_ops.split(K.flatten(bias), 6)
# Note that the gate order for CuDNN is different from the canonical format.
# canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need to
# be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = _canonical_to_params(
weights=weights,
biases=bias,
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_length = calculate_sequence_by_mask(mask, time_major)
if go_backwards:
inputs = array_ops.reverse_sequence_v2(inputs, sequence_length,
seq_axis=0, batch_axis=1)
outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs, input_h=init_h, input_c=0, params=params, is_training=True,
rnn_mode='gru', sequence_lengths=sequence_length)
else:
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=0, params=params, is_training=True,
rnn_mode='gru')
last_output = outputs[-1]
if not time_major:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = h[0]
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, _runtime(_RUNTIME_GPU)
def gru_with_backend_selection(normal_gru_params, cudnn_gru_params):
"""Call the GRU with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
normal_gru_params: Dict, parameters for the generic TF function.
cudnn_gru_params: Dict, parameters for the CuDNN specific TF function.
Returns:
List of output tensors, same as standard_gru.
"""
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple GRU layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'gru_' + str(uuid.uuid4())
defun_standard_gru = _generate_defun_backend(
api_name, _CPU_DEVICE_NAME, standard_gru)
defun_cudnn_gru = _generate_defun_backend(
api_name, _GPU_DEVICE_NAME, cudnn_gru)
# Call the normal GRU impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, runtime = defun_standard_gru(
**normal_gru_params)
function.register(defun_cudnn_gru, **cudnn_gru_params)
return last_output, outputs, new_h, runtime
@keras_export('keras.layers.LSTMCell', v1=[])
class LSTMCell(recurrent.LSTMCell):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of smaller dot
products and additions, whereas mode 2 (default) will batch them into
fewer, larger operations. These modes will have different performance
profiles on different hardware and for different applications.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
**kwargs):
super(LSTMCell, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
**kwargs)
@keras_export('keras.layers.LSTM', v1=[])
class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM):
"""Long Short-Term Memory layer - Hochreiter 1997.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the CuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == 'tanh'
2. `recurrent_activation` == 'sigmoid'
3. `recurrent_dropout` == 0
4. `unroll` is False
5. `use_bias` is True
6. Inputs are not masked or strictly right padded.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
is applied (ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2. Mode 1 will structure
its operations as a larger number of smaller dot products and additions,
whereas mode 2 will batch them into fewer, larger operations. These modes
will have different performance profiles on different hardware and for
different applications.
return_sequences: Boolean. Whether to return the last output. in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
unroll: Boolean (default False). If True, the network will be unrolled, else
a symbolic loop will be used. Unrolling can speed-up a RNN, although it
tends to be more memory-intensive. Unrolling is only suitable for short
sequences.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=2,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
unroll=False,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self.return_runtime = kwargs.pop('return_runtime', False)
super(LSTM, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
time_major=time_major,
unroll=unroll,
**kwargs)
self.state_spec = [
InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
]
self.could_use_cudnn = (
activation == 'tanh' and recurrent_activation == 'sigmoid' and
recurrent_dropout == 0 and not unroll and use_bias)
def call(self, inputs, mask=None, training=None, initial_state=None):
# LSTM does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self.could_use_cudnn:
# Fall back to use the normal LSTM.
kwargs = {'training': training}
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
runtime = _runtime(_RUNTIME_UNKNOWN)
else:
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
# Since the CuDNN has an extra set of bias, those bias will be passed to
# both normal and CuDNN implementations.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
cudnn_lstm_kwargs = {
'inputs': inputs,
'init_h': initial_state[0],
'init_c': initial_state[1],
'kernel': self.cell.kernel,
'recurrent_kernel': self.cell.recurrent_kernel,
'bias': self.cell.bias,
'mask': mask,
'time_major': self.time_major,
'go_backwards': self.go_backwards
}
normal_lstm_kwargs = cudnn_lstm_kwargs.copy()
normal_lstm_kwargs.update({
'activation': self.activation,
'recurrent_activation': self.recurrent_activation
})
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME
or (device_type is None and context.num_gpus() > 0))
and
(mask is None or is_sequence_right_padded(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
# GPU implementation when GPU is available.
if can_use_gpu:
last_output, outputs, new_h, new_c, runtime = cudnn_lstm(
**cudnn_lstm_kwargs)
else:
last_output, outputs, new_h, new_c, runtime = standard_lstm(
**normal_lstm_kwargs)
else:
if mask is None:
(last_output, outputs,
new_h, new_c, runtime) = lstm_with_backend_selection(
normal_lstm_kwargs, cudnn_lstm_kwargs)
else:
def with_mask_support():
# TODO(b/134702514): Change to use backend selection.
# return lstm_with_backend_selection(normal_lstm_kwargs,
# cudnn_lstm_kwargs)
return standard_lstm(**normal_lstm_kwargs)
def without_mask_support():
return standard_lstm(**normal_lstm_kwargs)
(last_output, outputs,
new_h, new_c, runtime) = control_flow_ops.cond(
is_sequence_right_padded(mask, self.time_major),
true_fn=with_mask_support,
false_fn=without_mask_support)
states = [new_h, new_c]
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self.return_runtime:
return output, runtime
else:
return output
def _canonical_to_params(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to CuDNN compatible parameter.
Note that Keras weights for kernels are different from the CuDNN format. Eg.:
```
Keras CuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the individual kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to CuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to CuDNN ops as param.
"""
def convert(w):
return array_ops.transpose(w) if transpose_weights else w
weights = [array_ops.reshape(convert(x), shape) for x in weights]
biases = [array_ops.reshape(x, shape) for x in biases]
return array_ops.concat(weights + biases, axis=0)
def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias,
activation, recurrent_activation, mask, time_major,
go_backwards):
"""LSTM with standard kernel implementation.
This implementation can be run on all types for hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Note that the first half of the bias tensor should be ignored by this impl.
The CuDNN impl need an extra set of input gate bias. In order to make the both
function take same shape of parameter, that extra set of bias is also feed
here.
Args:
inputs: input tensor of LSTM layer.
init_h: initial state tensor for the cell output.
init_c: initial state tensor for the cell hidden state.
kernel: weights for cell kernel.
recurrent_kernel: weights for cell recurrent kernel.
bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
activation: Activation function to use for output.
recurrent_activation: Activation function to use for hidden recurrent state.
mask: Boolean tensor for mask out the steps within sequence.
time_major: boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
state_1: the cell hidden state, which has same shape as init_c.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = K.dot(cell_inputs, kernel)
z += K.dot(h_tm1, recurrent_kernel)
z = K.bias_add(z, bias)
z0, z1, z2, z3 = array_ops.split(z, 4, axis=1)
i = recurrent_activation(z0)
f = recurrent_activation(z1)
c = f * c_tm1 + i * activation(z2)
o = recurrent_activation(z3)
h = o * activation(c)
return h, [h, c]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=timesteps)
return (last_output, outputs, new_states[0], new_states[1],
_runtime(_RUNTIME_CPU))
def cudnn_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards):
"""LSTM with CuDNN implementation which is only available for GPU.
Note that currently only right padded data is supported, or the result will be
polluted by the unmasked data which should be filtered.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
Returns:
last_output: Output tensor for the last timestep, which has shape
[batch, units].
outputs: Output tensor for all timesteps, which has shape
[batch, time, units].
state_0: The cell output, which has same shape as init_h.
state_1: The cell hidden state, which has same shape as init_c.
runtime: Constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should not be used by user.
"""
if not time_major:
# Cudnn kernel prefer the input to be time major.
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
init_h = array_ops.expand_dims(init_h, axis=0)
init_c = array_ops.expand_dims(init_c, axis=0)
weights = array_ops.split(kernel, 4, axis=1)
weights += array_ops.split(recurrent_kernel, 4, axis=1)
# CuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
params = _canonical_to_params(
weights=weights,
biases=array_ops.split(full_bias, 8),
shape=constant_op.constant([-1]),
transpose_weights=True)
if mask is not None:
sequence_length = calculate_sequence_by_mask(mask, time_major)
if go_backwards:
inputs = array_ops.reverse_sequence_v2(inputs, sequence_length,
seq_axis=0, batch_axis=1)
outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(
inputs, input_h=init_h, input_c=init_c, params=params, is_training=True,
rnn_mode='lstm', sequence_lengths=sequence_length)
else:
# # Fill the array with shape [batch] with value of max timesteps.
# sequence_length = array_ops.fill([array_ops.shape(inputs)[1]],
# array_ops.shape(inputs)[0])
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = array_ops.reverse(inputs, axis=[0])
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=init_h, input_c=init_c, params=params, is_training=True,
rnn_mode='lstm')
last_output = outputs[-1]
if not time_major:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = h[0]
c = c[0]
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if mask is not None:
last_output = h
return last_output, outputs, h, c, _runtime(_RUNTIME_GPU)
def lstm_with_backend_selection(normal_lstm_params, cudnn_lstm_params):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
CuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
normal_lstm_params: Dict, parameters for the generic TF function.
cudnn_lstm_params: Dict, parameters for the CuDNN specific TF function.
Returns:
List of output tensors, same as standard_lstm.
"""
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
defun_standard_lstm = _generate_defun_backend(
api_name, _CPU_DEVICE_NAME, standard_lstm)
defun_cudnn_lstm = _generate_defun_backend(
api_name, _GPU_DEVICE_NAME, cudnn_lstm)
# Call the normal LSTM impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(
**normal_lstm_params)
function.register(defun_cudnn_lstm, **cudnn_lstm_params)
return last_output, outputs, new_h, new_c, runtime
def is_sequence_right_padded(mask, time_major):
"""Check the mask tensor and see if it right padded.
For CuDNN kernel, it uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then CuDNN kernel won't be work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not
pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep] or [timestep, batch]
when time_major is True.
time_major: Boolean, whether the input mask is time major or batch major.
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
timestep_index = 0 if time_major else 1
max_seq_length = array_ops.shape(mask)[timestep_index]
reversed_mask = math_ops.cast(array_ops.reverse(mask, axis=[timestep_index]),
dtypes.int32)
# Use the argmax to find the index of leading 1 in the reversed mask, which is
# the index of the last True value in the original mask.
index = math_ops.argmax(reversed_mask, axis=timestep_index,
output_type=dtypes.int32)
count_of_true = math_ops.reduce_sum(reversed_mask, axis=timestep_index)
# If the data is strictly right padded, then the
# "index = max_seq_length - count_of_true" should hold.
return math_ops.reduce_all(
math_ops.equal(index, max_seq_length - count_of_true))
def calculate_sequence_by_mask(mask, time_major):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False],
[True, False, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should be
1D tensor with value [2, 3]. Note that for the second example, we need to find
the index of the last True value, which is 2 and sequence length is 3.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
time_major=True.
time_major: Boolean, which indicates whether the mask is time major or batch
major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if time_major else 1
max_seq_length = array_ops.shape(mask)[timestep_index]
reversed_mask = math_ops.cast(array_ops.reverse(mask, axis=[timestep_index]),
dtypes.int32)
# Use the argmax to find the index of leading 1 in the reversed mask, which is
# the index of the last True value in the original mask.
reversed_index = math_ops.argmax(reversed_mask, axis=timestep_index,
output_type=dtypes.int32)
return max_seq_length - reversed_index
def _generate_defun_backend(unique_api_name, preferred_device, func):
function_attributes = {
_DEFUN_API_NAME_ATTRIBUTE: unique_api_name,
_DEFUN_DEVICE_ATTRIBUTE: preferred_device,
# TODO(b/133178886): The function is auto inlined in eager context, which
# make grappler fail to do the optimization. Force it to not inline here.
'_noinline': True,
}
return function.defun_with_attributes(func=func,
attributes=function_attributes)
def _get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = context.context().device_name
if current_device is None:
return None
return device.DeviceSpec.from_string(current_device).device_type
def _runtime(runtime_name):
with ops.device('/cpu:0'):
return constant_op.constant(
runtime_name, dtype=dtypes.float32, name='runtime')
| tensorflow-master | tensorflow/python/keras/layers/recurrent_v2.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for allowing TF ops to work with Keras Functional API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
def _single_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = gen_nn_ops.relu(x)
return inputs, outputs
def _single_identity_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = array_ops.identity(x)
assert 'Identity' in outputs.name
return inputs, outputs
def _multiple_ops_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = gen_nn_ops.relu(x)
return inputs, outputs
def _single_op_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return inputs, outputs
def _multiple_ops_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return inputs, outputs
def _single_standalone_branch():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = x * 2
return inputs, outputs
def _single_op_with_attrs():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
outputs = keras.layers.Dense(10)(x)
return inputs, outputs
def _multiple_uses():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
x1 = keras.layers.Dense(10)(x)
x2 = keras.layers.Dense(10)(x)
outputs = x1 + x2
return inputs, outputs
def _op_with_tensor_list():
inputs = keras.Input(shape=(10,))
x = array_ops.concat([inputs, inputs], axis=1)
outputs = keras.layers.Dense(10)(x)
return inputs, outputs
def _add_n():
inputs = keras.Input(shape=(10,))
outputs = math_ops.add_n([inputs, inputs, inputs])
return inputs, outputs
def _reuse_op():
inputs = keras.Input(shape=(10,))
# This op needs to be checked multiple times.
x = gen_nn_ops.relu(inputs)
y = keras.layers.Dense(10)(x)
x2 = x * 2
y2 = keras.layers.Dense(10)(x2)
outputs = y + y2
return inputs, outputs
class LayerWithLayer(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
return self.layer(inputs)
def _inner_layer():
inputs = keras.Input(shape=(10,))
outputs = LayerWithLayer()(inputs)
return inputs, outputs
@keras_parameterized.run_all_keras_modes
class AutoLambdaTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('single_op_at_end', _single_op_at_end),
('single_identity_op_at_end', _single_identity_op_at_end),
('multiple_ops_at_end', _multiple_ops_at_end),
('single_op_in_middle', _single_op_in_middle),
('multiple_ops_in_middle', _multiple_ops_in_middle),
('single_standalone_branch', _single_standalone_branch),
('single_op_with_attrs', _single_op_with_attrs),
('multiple_uses', _multiple_uses),
('op_with_tensor_list', _op_with_tensor_list), ('add_n', _add_n),
('_reuse_op', _reuse_op), ('_inner_layer', _inner_layer))
def test_autolambda(self, model_fn):
inputs, outputs = model_fn()
model = keras.Model(inputs, outputs)
model.compile(
adam.Adam(0.001), 'mse', run_eagerly=testing_utils.should_run_eagerly())
np_inputs = nest.map_structure(lambda x: np.ones((10, 10), 'float32'),
inputs)
np_outputs = nest.map_structure(lambda x: np.ones((10, 10), 'float32'),
outputs)
model.fit(np_inputs, np_outputs, batch_size=2)
model(np_inputs) # Test calling the model directly on inputs.
new_model = keras.Model.from_config(
model.get_config(), custom_objects={'LayerWithLayer': LayerWithLayer})
new_model.compile(
adam.Adam(0.001), 'mse', run_eagerly=testing_utils.should_run_eagerly())
new_model.fit(np_inputs, np_outputs, batch_size=2)
new_model(np_inputs) # Test calling the new model directly on inputs.
def test_numerical_correctness_simple(self):
x = ops.convert_to_tensor([[-1., 0., -2., 1.]])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [[0., 0., 0., 1.]])
def test_numerical_correctness_with_attrs(self):
x = ops.convert_to_tensor([[1.5, 1.5], [2.5, 3.5]])
inputs = keras.Input(shape=(10,))
outputs = math_ops.reduce_mean(inputs, axis=1)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [1.5, 3.])
def test_numerical_correctness_serialization(self):
x = ops.convert_to_tensor([-1., 0., -2., 1.])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model1 = keras.Model(inputs, outputs)
y1 = self.evaluate(model1(x))
model2 = model1.from_config(model1.get_config())
y2 = self.evaluate(model2(x))
self.assertAllClose(y1, y2)
def test_gradient_tape_in_function(self):
z = keras.Input((1,))
x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1)))
x = math_ops.reduce_mean(x, axis=0, keepdims=True)
h = gen_nn_ops.relu(x)
m = keras.Model(z, h)
@def_function.function()
def f(x):
with backprop.GradientTape() as t:
t.watch(x)
z = m(x ** 2)
grads = t.gradient(z, x)
return grads
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
f = def_function.function(f)
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
def test_no_tracking(self):
x = keras.backend.placeholder((10, 10))
keras.layers.Dense(1)(x)
self.assertTrue(x._keras_history_checked)
def test_timing_scales_linearly(self):
def _construct_graph_of_size(size):
start = time.time()
x = keras.backend.placeholder(shape=(10, 4))
for _ in range(size):
x = keras.layers.Dense(4)(x)
x = gen_nn_ops.relu(x)
end = time.time()
return end - start
size_50 = _construct_graph_of_size(50)
size_500 = _construct_graph_of_size(500)
# Check construction time grows approx. linearly with size.
e = 3 # Fudge factor to prevent flakiness.
self.assertLess(size_500, (10 * e) * size_50)
def test_no_mask_tracking(self):
x = keras.backend.placeholder((10, 10))
y = keras.layers.Masking(0.)(x)
self.assertTrue(y._keras_mask._keras_history_checked)
def test_built(self):
inputs = keras.Input(shape=(10,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
for layer in model.layers:
self.assertTrue(layer.built)
# Test something that requires Layers to be built.
model.summary()
class InputInEagerTest(test.TestCase):
"""Tests ops on graph tensors in Eager runtime.
Input returns graph/symbolic tensors in the Eager runtime (this
happens, for example, with tensors returned from Keras layers). These
should be routed to the graph-style branch of these ops (b/134715641)
"""
def test_identity(self):
with context.eager_mode():
x = keras.Input(shape=(1,))
self.assertTrue(hasattr(x, 'graph'))
ident = array_ops.identity(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Identity', ident.name)
def test_size(self):
with context.eager_mode():
x = keras.Input(shape=(3,))
self.assertTrue(hasattr(x, 'graph'))
self.assertAllEqual(x.get_shape().as_list(), [None, 3])
sz = array_ops.size(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Size', sz.name)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/tensorflow_op_layer_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that operate regularization via the addition of noise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Arguments:
stddev: Float, standard deviation of the noise distribution.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + K.random_normal(
shape=array_ops.shape(inputs), mean=0., stddev=self.stddev)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.GaussianDropout')
class GaussianDropout(Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Arguments:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * K.random_normal(
shape=array_ops.shape(inputs), mean=1.0, stddev=stddev)
return K.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.AlphaDropout')
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(AlphaDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else array_ops.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = math_ops.greater_equal(
K.random_uniform(noise_shape, seed=seed), rate)
kept_idx = math_ops.cast(kept_idx, K.floatx())
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return K.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(AlphaDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tensorflow-master | tensorflow/python/keras/layers/noise.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers that implement explicit (approximate) kernel feature maps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that maps its inputs using random Fourier features.
This layer implements a feature map \\(\phi: \mathbb{R}^d \rightarrow
\mathbb{R}^D\\) which approximates shift-invariant kernels. A kernel function
K(x, y) defined over \\(\mathbb{R}^d x \mathbb{R}^d\\) is shift-invariant if
K(x, y) = k(x-y) for some function defined over \\(\mathbb{R}^d\\). Many
popular Radial Basis Functions (in short RBF), including gaussian and
laplacian kernels are shift-invariant.
The layer approximates a (shift invariant) kernel K in the following sense:
up to a scaling factor, for all inputs \\(x, y \in \mathbb{R}^d\\)
\\(\phi(x)^T \cdot \phi(y) \approx K(x, y)\\)
The implementation of this layer is based on the following paper:
"Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht.
(link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
The distribution from which the parameters of the random features map (layer)
are sampled, determines which shift-invariant kernel the layer approximates
(see paper for more details). The users can use the distribution of their
choice. Due to their popularity, the layer supports the out-of-the-box
approximation of the following RBF kernels:
- Gaussian: \\(K(x, y) = e^{-\frac{\|x-y\|_2^2}{2 \cdot scale^2}}\\)
- Laplacian: \\(K(x, y) = e^{-\frac{\|x-y\|_1}{scale}}\\)
NOTE: Unlike the map described in the paper and the scikit-learn
implementation, the output of this layer does not apply the sqrt(2/D)
normalization factor.
Usage for ML: Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss) etc.
Example of building a kernel multinomial logistic regression model with
Gaussian kernel in keras:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer='gaussian',
scale=5.0,
...)
model = tf.keras.models.Sequential()
model.add(random_features_layer)
model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')
model.compile(
loss=tf.keras.losses.categorical_crossentropy, optimizer=..., metrics=...)
```
To use another kernel, replace the layer creation command with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Arguments:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string or an instance of TensorFlow's Initializer
class. Currently only 'gaussian' and 'laplacian' are supported as string
initializers (case insensitive). Note that these parameters are not
trainable.
scale: For gaussian and laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
the implementation chooses a default value (1.0 typically). Both the
approximation error of the kernel and the classification quality are
sensitive to this parameter. If trainable is set to True, this parameter
is learned end-to-end during training and the provided value serves as an
initialization value.
NOTE: When this layer is used to map the initial features and then the
transformed features are fed to a linear model, by making `scale`
trainable, the resulting optimization problem is no longer convex (even
if the loss function used by the linear model is convex).
trainable: Whether the scaling parameter of th layer is trainable. Defaults
to False.
name: name for the RandomFourierFeatures layer.
Raises:
ValueError: if output_dim or stddev are not positive or if the provided
kernel_initializer is not supported.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
'`output_dim` should be a positive integer. Given: {}.'.format(
output_dim))
if isinstance(kernel_initializer, six.string_types):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'
.format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
'Given: {}.'.format(scale))
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. Got {} instead.'.format(
input_shape.ndims))
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the inputs to `RandomFourierFeatures` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
unscaled_kernel = self.add_weight(
name='unscaled_random_features',
shape=(input_dim, self.output_dim),
dtype=dtypes.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='random_features_bias',
shape=(self.output_dim,),
dtype=dtypes.float32,
initializer=init_ops.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
scale = self.add_weight(
name='random_features_scale',
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
self.kernel = (1.0 / scale) * unscaled_kernel
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
inputs = gen_math_ops.cast(inputs, dtypes.float32)
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
outputs = nn.bias_add(outputs, self.bias)
return gen_math_ops.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The innermost dimension of input shape must be defined. Given: %s' %
input_shape)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if isinstance(self.kernel_initializer, init_ops.Initializer):
kernel_initializer = initializers.serialize(self.kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, six.string_types):
if initializer.lower() == 'gaussian':
random_features_initializer = init_ops.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = init_ops.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format(
random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, six.string_types) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
| tensorflow-master | tensorflow/python/keras/layers/kernelized.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class SimpleRNNLayerTest(keras_parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('rmsprop', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse', run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/simplernn_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cell wrapper v2 implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testResidualWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor(np.array([[1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
@test_util.run_in_graph_and_eager_modes
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor(np.array([[1., 1., 1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = wrapper_type(
base_cell, residual_with_slice_fn)(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate(
[g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.DeviceWrapper
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.GRUCell(3)
wrapped_cell = wrapper_type(cell, "/cpu:0")
(name, dep), = wrapped_cell._checkpoint_dependencies
wrapped_cell.get_config() # Should not throw an error
self.assertIs(dep, cell)
self.assertEqual("cell", name)
outputs, _ = wrapped_cell(x, m)
self.assertIn("cpu:0", outputs.device.lower())
@parameterized.parameters(
[[rnn_cell_impl.DropoutWrapper, rnn_cell_wrapper_v2.DropoutWrapper],
[rnn_cell_impl.ResidualWrapper, rnn_cell_wrapper_v2.ResidualWrapper]])
@test_util.run_in_graph_and_eager_modes
def testWrapperKerasStyle(self, wrapper, wrapper_v2):
"""Tests if wrapper cell is instantiated in keras style scope."""
wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1))
self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None))
wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1))
self.assertFalse(wrapped_cell._keras_style)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2VariableNames(self, wrapper):
"""Tests that variables names do not depend on wrapper in RNN layer."""
def _rnn_input(apply_wrapper, name):
"""Creates a RNN layer with/without wrapper and returns built rnn cell."""
with base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1, name="basic_rnn_cell")
for _ in range(2)])
if apply_wrapper:
rnn_cell = wrapper(base_cell)
else:
rnn_cell = base_cell
rnn_layer = layers.RNN(rnn_cell, name=name)
inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32)
_ = rnn_layer(inputs)
return base_cell._cells[0]
rnn_1 = _rnn_input(True, name="rnn_0")
rnn_2 = _rnn_input(False, name="rnn_1")
for i, cell in enumerate([rnn_1, rnn_2]):
var_prefix = "rnn_{}/cell_0/basic_rnn_cell/".format(i)
self.assertCountEqual([v.name for v in cell.weights],
(var_prefix + "kernel:0", var_prefix + "bias:0"))
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell")
rnn_cell = wrapper(base_cell)
rnn_layer = layers.RNN(rnn_cell)
inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32)
rnn_layer(inputs)
expected_weights = ["rnn/" + var for var in
("kernel:0", "recurrent_kernel:0", "bias:0")]
self.assertLen(rnn_cell.weights, 3)
self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.trainable_variables],
expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables],
[])
self.assertCountEqual([v.name for v in rnn_cell.cell.weights],
expected_weights)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
with base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = ops.convert_to_tensor([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor([[1]], dtype=dtypes.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)
self.assertTrue(all(["_wrapper" in v.name for v in weights]))
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Build(self, wrapper):
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper(cell)
wrapper.build((1,))
self.assertTrue(cell.built)
def testDeviceWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DeviceWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell, "/cpu:0")
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
def testResidualWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.ResidualWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4)
def residual_fn(inputs, outputs):
return inputs * 3 + outputs
wrapper = wrapper_cls(cell, residual_fn=residual_fn)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5)
def testDropoutWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, dropout_state_filter_visitor=lambda s: True)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertTrue(reconstructed_wrapper._dropout_state_filter(None))
def dropout_state_filter_visitor(unused_state):
return False
wrapper = wrapper_cls(
cell, dropout_state_filter_visitor=dropout_state_filter_visitor)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertFalse(reconstructed_wrapper._dropout_state_filter(None))
if __name__ == "__main__":
test.main()
| tensorflow-master | tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent v2 layers functionality other than GRU, LSTM.
See also: lstm_v2_test.py, gru_v2_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class RNNV2Test(keras_parameterized.TestCase):
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_device_placement(self, layer):
if not test.is_gpu_available():
self.skipTest('Need GPU for testing.')
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
# Test when GPU is available but not used, the graph should be properly
# created with CPU ops.
with test_util.device(use_gpu=False):
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
layer(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_reset_dropout_mask_between_batch(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/29187 for more details
batch_size = 8
timestep = 12
embedding_dim = 10
units = 5
layer = layer(units, dropout=0.5, recurrent_dropout=0.5)
inputs = np.random.random((batch_size, timestep, embedding_dim)).astype(
np.float32)
previous_dropout, previous_recurrent_dropout = None, None
for _ in range(5):
layer(inputs, training=True)
dropout = layer.cell.get_dropout_mask_for_cell(inputs, training=True)
recurrent_dropout = layer.cell.get_recurrent_dropout_mask_for_cell(
inputs, training=True)
if previous_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_dropout),
self.evaluate(dropout))
previous_dropout = dropout
if previous_recurrent_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_recurrent_dropout),
self.evaluate(recurrent_dropout))
previous_recurrent_dropout = recurrent_dropout
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_recurrent_dropout_with_stateful_RNN(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/27829 for details.
# The issue was caused by using inplace mul for a variable, which was a
# warning for RefVariable, but an error for ResourceVariable in 2.0
keras.models.Sequential([
layer(128, stateful=True, return_sequences=True, dropout=0.2,
batch_input_shape=[32, None, 5], recurrent_dropout=0.2)
])
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/recurrent_v2_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that act as activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: Float >= 0. Negative slope coefficient.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.PReLU')
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = K.relu(inputs)
neg = -self.alpha * K.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ELU')
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: Scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ThresholdedReLU')
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
`f(x) = x for x > theta`,
`f(x) = 0 otherwise`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
theta: Float >= 0. Threshold location of activation.
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.theta = K.cast_to_floatx(theta)
def call(self, inputs):
return inputs * math_ops.cast(
math_ops.greater(inputs, self.theta), K.floatx())
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.Softmax')
class Softmax(Layer):
"""Softmax activation function.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
axis: Integer, axis along which the softmax normalization is applied.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs):
return K.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ReLU')
class ReLU(Layer):
"""Rectified Linear Unit activation function.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = negative_slope * (x - threshold)` otherwise.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
max_value: Float >= 0. Maximum activation value.
negative_slope: Float >= 0. Negative slope coefficient.
threshold: Float. Threshold value for thresholded activation.
"""
def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
super(ReLU, self).__init__(**kwargs)
if max_value is not None and max_value < 0.:
raise ValueError('max_value of Relu layer '
'cannot be negative value: ' + str(max_value))
if negative_slope < 0.:
raise ValueError('negative_slope of Relu layer '
'cannot be negative value: ' + str(negative_slope))
self.support_masking = True
if max_value is not None:
max_value = K.cast_to_floatx(max_value)
self.max_value = max_value
self.negative_slope = K.cast_to_floatx(negative_slope)
self.threshold = K.cast_to_floatx(threshold)
def call(self, inputs):
# alpha is used for leaky relu slope in activations instead of
# negative_slope.
return K.relu(inputs,
alpha=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold)
def get_config(self):
config = {
'max_value': self.max_value,
'negative_slope': self.negative_slope,
'threshold': self.threshold
}
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tensorflow-master | tensorflow/python/keras/layers/advanced_activations.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.platform import test
# TODO(b/125513261): Move this back into wrappers_test.py.
class TimeDistributedLearningPhaseTest(test.TestCase):
def test_TimeDistributed_learning_phase(self):
with self.cached_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(keras.layers.Dropout(.999))(
x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/time_distributed_learning_phase_test.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras subclassed layers utilizing desired user syntax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
class SubclassedLayersTest(keras_parameterized.TestCase):
def test_simple_build_with_constant(self):
class BuildConstantLayer(keras.layers.Layer):
def build(self, input_shape):
self.b = ops.convert_to_tensor(2.0)
def call(self, inputs):
return self.b * inputs
layer = BuildConstantLayer()
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(1,))
x = ops.convert_to_tensor([[3.0]])
self.assertEqual(
tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly())
self.assertEqual(
tf_utils.is_symbolic_tensor(layer(x)), not context.executing_eagerly())
self.assertAllClose(keras.backend.get_value(layer(x)), [[6.0]])
def test_build_with_derived_constant(self):
class BuildDerivedConstantLayer(keras.layers.Layer):
def build(self, input_shape):
a = ops.convert_to_tensor(1.0)
b = 2.0 * a
self.variable = variables.Variable(b)
self.constant = ops.convert_to_tensor(self.variable)
def call(self, inputs):
return self.variable * self.constant * inputs
layer = BuildDerivedConstantLayer()
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(1,))
x = ops.convert_to_tensor([[3.0]])
self.assertEqual(
tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly())
self.assertEqual(
tf_utils.is_symbolic_tensor(layer(x)), not context.executing_eagerly())
self.assertAllClose(keras.backend.get_value(layer(x)), [[12.0]])
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/subclassed_layers_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cudnn recurrent layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class CuDNNTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
return_sequences=[True, False]))
@test_util.run_gpu_only
def test_cudnn_rnn_return_sequence(self, layer_class, return_sequences):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'return_sequences': return_sequences},
input_shape=(num_samples, timesteps, input_size))
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
go_backwards=[True, False]))
@test_util.run_gpu_only
def test_cudnn_rnn_go_backward(self, layer_class, go_backwards):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'go_backwards': go_backwards},
input_shape=(num_samples, timesteps, input_size))
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_return_state(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
_, state = outputs[0], outputs[1:]
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
model.run_eagerly = testing_utils.should_run_eagerly()
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_time_major_input(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
model = keras.models.Sequential()
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
layer = layer_class(units, time_major=True, return_sequences=True)
model.add(layer)
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(learning_rate=0.001))
model.fit(
np.ones((num_samples, timesteps, input_size)),
np.ones((num_samples, timesteps, units)))
out = model.predict(np.ones((num_samples, timesteps, input_size)))
self.assertEqual(out.shape, (num_samples, timesteps, units))
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
def test_specify_initial_state_keras_tensor(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input((timesteps, input_size))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertIn(initial_state[0], layer._inbound_nodes[0].input_tensors)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
class CuDNNGraphOnlyTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_deprecated_v1
@test_util.run_gpu_only
def test_regularizer(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2')
layer.build((None, None, input_size))
self.assertEqual(len(layer.losses), 3)
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
activity_regularizer='l2')
self.assertTrue(layer.activity_regularizer)
x = keras.backend.variable(
np.ones((num_samples, timesteps, input_size)))
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
@test_util.run_gpu_only
@test_util.run_v1_only('b/120941292')
def test_statefulness(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
with self.cached_session(use_gpu=True):
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
10,
input_size,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
@test_util.run_all_in_graph_and_eager_modes
class CuDNNV1OnlyTest(keras_parameterized.TestCase):
@test_util.run_gpu_only
def test_trainability(self):
input_size = 10
units = 2
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
layer = layer_class(units)
layer.build((None, None, input_size))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False],
bidirectional=[True, False], implementation=[1, 2],
model_nest_level=[1, 2], model_type=['seq', 'func']))
@test_util.run_v1_only('b/120911602, b/112083752')
@test_util.run_gpu_only
def test_load_weights_between_noncudnn_rnn(self, rnn_type, to_cudnn,
bidirectional, implementation,
model_nest_level, model_type):
input_size = 10
timesteps = 6
input_shape = (timesteps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, input_size))
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
'implementation': implementation
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
model = self._make_nested_model(input_shape, layer, model_nest_level,
model_type)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer,
model_nest_level, model_type)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
def _make_nested_model(self, input_shape, layer, level=1, model_type='func'):
# example: make_nested_seq_model((1,), Dense(10), level=2).summary()
def make_nested_seq_model(input_shape, layer, level=1):
model = layer
for i in range(1, level + 1):
layers = [keras.layers.InputLayer(input_shape),
model] if (i == 1) else [model]
model = keras.models.Sequential(layers)
if i > 1:
model.build((None,) + input_shape)
return model
# example: make_nested_func_model((1,), Dense(10), level=2).summary()
def make_nested_func_model(input_shape, layer, level=1):
model_input = keras.layers.Input(input_shape)
model = layer
for _ in range(level):
model = keras.models.Model(model_input, model(model_input))
return model
if model_type == 'func':
return make_nested_func_model(input_shape, layer, level)
elif model_type == 'seq':
return make_nested_seq_model(input_shape, layer, level)
def _convert_model_weights(self, source_model, target_model):
_, fname = tempfile.mkstemp('.h5')
source_model.save_weights(fname)
target_model.load_weights(fname)
os.remove(fname)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False]))
@test_util.run_v1_only('b/120911602')
@test_util.run_gpu_only
def test_load_weights_between_noncudnn_rnn_time_distributed(self, rnn_type,
to_cudnn):
# Similar test as test_load_weights_between_noncudnn_rnn() but has different
# rank of input due to usage of TimeDistributed. Issue: #10356.
input_size = 10
steps = 6
timesteps = 6
input_shape = (timesteps, steps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, steps, input_size))
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
layer = keras.layers.TimeDistributed(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
model = self._make_nested_model(input_shape, layer)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
@test_util.run_gpu_only
def test_cudnnrnn_bidirectional(self):
rnn = keras.layers.CuDNNGRU
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'concat'
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(None, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer=R'rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.Input((timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=R'rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = keras.Input(batch_shape=(1, timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x, y, epochs=1, batch_size=1)
@test_util.run_gpu_only
def test_preprocess_weights_for_loading_gru_incompatible(self):
"""Test loading weights between incompatible layers.
Should fail fast with an exception.
"""
input_shape = (3, 5)
def gru(cudnn=False, **kwargs):
layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
return layer_class(2, input_shape=input_shape, **kwargs)
def get_layer_weights(layer):
layer.build(input_shape=input_shape)
return layer.get_weights()
def assert_not_compatible(src, dest, message):
with self.assertRaises(ValueError) as ex:
keras.saving.preprocess_weights_for_loading(
dest,
get_layer_weights(src))
self.assertIn(message, str(ex.exception))
assert_not_compatible(
gru(),
gru(cudnn=True),
'GRU(reset_after=False) is not compatible with CuDNNGRU')
assert_not_compatible(
gru(cudnn=True),
gru(),
'CuDNNGRU is not compatible with GRU(reset_after=False)')
assert_not_compatible(
gru(),
gru(reset_after=True),
'GRU(reset_after=False) is not compatible with '
'GRU(reset_after=True)')
assert_not_compatible(
gru(reset_after=True),
gru(),
'GRU(reset_after=True) is not compatible with '
'GRU(reset_after=False)')
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/cudnn_recurrent_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class LSTMLayerTest(keras_parameterized.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(embedding_dim,
input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = keras.layers.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_LSTM(self):
layer_class = keras.layers.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
lstm_cells = [keras.layers.LSTMCell(10), keras.layers.LSTMCell(5)]
model.add(keras.layers.RNN(lstm_cells, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.backend.random_normal_variable(
(num_samples, units), 0, 1)
for _ in range(num_states)]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, stateful=True)
outputs = layer(inputs)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = keras.layers.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = keras.layers.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model(inputs, output)
model.compile(loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse', run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/lstm_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
class BatchNormalizationBase(Layer):
"""Base class of Batch normalization layer (Ioffe and Szegedy, 2014).
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.
trainable: Boolean, if `True` the variables will be marked as trainable.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using the
mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using the
mean and variance of its moving statistics, learned during training.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
{{TRAINABLE_ATTRIBUTE_NOTE}}
"""
# By default, the base class uses V2 behavior. The BatchNormalization V1
# subclass sets this to False to use the V1 behavior.
_USE_V2_BEHAVIOR = True
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalizationBase, self).__init__(
name=name, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('axis must be int or list, type given: %s'
% type(axis))
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if self._USE_V2_BEHAVIOR:
if fused:
self._raise_if_fused_cannot_be_used()
# We leave fused as None if self._fused_can_be_used()==True, since we
# still may set it to False in self.build() if the input rank is not 4.
elif fused is None and not self._fused_can_be_used():
fused = False
elif fused is None:
fused = True
self.supports_masking = True
self.fused = fused
self._bessels_correction_test_only = True
self._trainable_var = None
self.trainable = trainable
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def _raise_if_fused_cannot_be_used(self):
"""Raises a ValueError if fused implementation cannot be used.
In addition to the checks done in this function, the input tensors rank must
be 4. The input rank check can only be done once the input shape is known.
"""
# Currently fused batch norm doesn't support renorm. It also only supports a
# channel dimension on axis 1 or 3, when no virtual batch size or adjustment
# is used.
if self.renorm:
raise ValueError('Passing both fused=True and renorm=True is '
'unsupported')
axis = [self.axis] if isinstance(self.axis, int) else self.axis
# Axis -3 is equivalent to 1, and axis -1 is equivalent to 3, because the
# input rank is required to be 4 (which is checked later).
if len(axis) > 1 or axis[0] not in (-3, -1, 1, 3):
raise ValueError('Passing fused=True is only supported when axis is 1 '
'or 3')
if self.virtual_batch_size is not None:
raise ValueError('Passing fused=True is unsupported when '
'virtual_batch_size is specified.')
if self.adjustment is not None:
raise ValueError('Passing fused=True is unsupported when '
'adjustment is specified.')
def _fused_can_be_used(self):
try:
self._raise_if_fused_cannot_be_used()
return True
except ValueError:
return False
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
if self._trainable_var is not None:
self._trainable_var.update_value(value)
def _get_trainable_var(self):
if self._trainable_var is None:
self._trainable_var = K.freezable_variable(
self._trainable, name=self.name + '_trainable')
return self._trainable_var
@property
def _param_dtype(self):
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
return dtypes.float32
else:
return self.dtype or dtypes.float32
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused in (None, True):
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
if self._USE_V2_BEHAVIOR:
if self.fused is None:
self.fused = (ndims == 4)
elif self.fused and ndims != 4:
raise ValueError('Batch normalization layers with fused=True only '
'support 4D input tensors.')
else:
assert self.fused is not None
self.fused = (ndims == 4 and self._fused_can_be_used())
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.fused:
self._gamma_const = K.constant(
1.0, dtype=self._param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
if self.fused:
self._beta_const = K.constant(
0.0, dtype=self._param_dtype, shape=param_shape)
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name,
shape,
initializer=init_ops.zeros_initializer()):
"""Create a renorm variable."""
var = self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
return var
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape,
self.moving_mean_initializer)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_variance):
self.renorm_stddev = _renorm_variable(
'renorm_stddev', param_shape, self.moving_variance_initializer)
self.renorm_stddev_weight = _renorm_variable('renorm_stddev_weight',
())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum, inputs_size):
with K.name_scope('AssignMovingAvg') as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (
variable - math_ops.cast(value, variable.dtype)) * decay
if inputs_size is not None:
update_delta = array_ops.where(inputs_size > 0, update_delta,
K.zeros_like(update_delta))
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
# TODO(b/130185866): Support zero batch input in graph mode.
if ops.executing_eagerly_outside_functions(
) and distribution_strategy_context.has_strategy():
inputs_size = array_ops.size(inputs)
else:
inputs_size = None
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
def mean_update():
return self._assign_moving_average(self.moving_mean, mean, momentum,
inputs_size)
def variance_update():
return self._assign_moving_average(self.moving_variance, variance,
momentum, inputs_size)
self.add_update(mean_update)
self.add_update(variance_update)
return output
def _renorm_correction_and_moments(self, mean, variance, training,
inputs_size):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
renorm_mean = self.renorm_mean
# Avoid divide by zero early on in training.
renorm_stddev = math_ops.maximum(self.renorm_stddev, self.epsilon)
# Compute the corrections for batch renorm.
r = stddev / renorm_stddev
d = (mean - renorm_mean) / renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = tf_utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = tf_utils.smart_cond(training,
lambda: d,
lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value, inputs_size):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
"""Updates the var and weight, returns their updated ratio."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = self._assign_moving_average(var, value, self.renorm_momentum,
inputs_size)
new_weight = self._assign_moving_average(weight, weight_value,
self.renorm_momentum,
inputs_size)
# TODO(yuefengz): the updates to var and weighted can not be batched
# together if we fetch their updated values here. Consider calculating
# new values and delaying the updates.
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return tf_utils.smart_cond(training, _do_update, _fake_update)
# TODO(yuefengz): colocate the operations
update_new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight, mean,
inputs_size)
update_new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev, inputs_size)
# Update the inference mode moving averages with the batch value.
with ops.control_dependencies([update_new_mean, update_new_stddev]):
out_mean = array_ops.identity(mean)
out_variance = array_ops.identity(variance)
return (r, d, out_mean, out_variance)
def _moments(self, inputs, reduction_axes, keep_dims):
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
# TODO(b/129279393): Support zero batch input in non DistributionStrategy
# code as well.
# TODO(b/130185866): Support zero batch input in graph mode.
if (ops.executing_eagerly_outside_functions() and
distribution_strategy_context.has_strategy()):
inputs_size = array_ops.size(inputs)
mean = array_ops.where(inputs_size > 0, mean, K.zeros_like(mean))
variance = array_ops.where(inputs_size > 0, variance,
K.zeros_like(variance))
return mean, variance
def _get_training_value(self, training=None):
if training is None:
training = K.learning_phase()
if self._USE_V2_BEHAVIOR:
if isinstance(training, int):
training = bool(training)
if base_layer_utils.is_in_keras_graph():
training = math_ops.logical_and(training, self._get_trainable_var())
else:
training = math_ops.logical_and(training, self.trainable)
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value == False: # pylint: disable=singleton-comparison,g-explicit-bool-comparison
mean, variance = self.moving_mean, self.moving_variance
else:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = self._moments(
math_ops.cast(inputs, self._param_dtype),
reduction_axes,
keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: ops.convert_to_tensor(moving_mean))
variance = tf_utils.smart_cond(
training,
lambda: variance,
lambda: ops.convert_to_tensor(moving_variance))
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
else:
new_mean, new_variance = mean, variance
if ops.executing_eagerly_outside_functions(
) and distribution_strategy_context.has_strategy():
inputs_size = array_ops.size(inputs)
else:
inputs_size = None
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
new_mean, new_variance, training, inputs_size)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
def _do_update(var, value):
"""Compute the updates for mean and variance."""
return self._assign_moving_average(var, value, self.momentum,
inputs_size)
def mean_update():
true_branch = lambda: _do_update(self.moving_mean, new_mean)
false_branch = lambda: self.moving_mean
return tf_utils.smart_cond(training, true_branch, false_branch)
def variance_update():
true_branch = lambda: _do_update(self.moving_variance, new_variance)
false_branch = lambda: self.moving_variance
return tf_utils.smart_cond(training, true_branch, false_branch)
self.add_update(mean_update)
self.add_update(variance_update)
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
if scale is not None:
scale = math_ops.cast(scale, inputs.dtype)
# TODO(reedwm): Maybe do math in float32 if given float16 inputs, if doing
# math in float16 hurts validation accuracy of popular models like resnet.
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalizationBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def replace_in_base_docstring(replacements):
string = BatchNormalizationBase.__doc__
for old, new in replacements:
assert old in string
string.replace(old, new)
return string
@keras_export(v1=['keras.layers.BatchNormalization']) # pylint: disable=missing-docstring
class BatchNormalization(BatchNormalizationBase):
__doc__ = replace_in_base_docstring(
[('''
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.''',
'''
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.'''),
('{{TRAINABLE_ATTRIBUTE_NOTE}}', '')])
_USE_V2_BEHAVIOR = False
@keras_export('keras.layers.LayerNormalization')
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer or List/Tuple. The axis that should be normalized
(typically the features axis).
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
trainable: Boolean, if `True` the variables will be marked as trainable.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Layer Normalization](https://arxiv.org/abs/1607.06450)
"""
def __init__(self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
trainable=True,
name=None,
**kwargs):
super(LayerNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise ValueError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received instead: %s' % axis)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
def build(self, input_shape):
ndims = len(input_shape)
if ndims is None:
raise ValueError('Input shape %s has undefined rank.' % input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))
param_shape = [input_shape[dim] for dim in self.axis]
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
def call(self, inputs):
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, self.axis, keep_dims=True)
# Broadcasting only necessary for norm where the axis is not just
# the last dimension
broadcast_shape = [1] * ndims
for dim in self.axis:
broadcast_shape[dim] = input_shape.dims[dim].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
self.axis != [ndims - 1]):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
# Compute layer normalization using the batch_normalization function.
outputs = nn.batch_normalization(
inputs,
mean,
variance,
offset=offset,
scale=scale,
variance_epsilon=self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tensorflow-master | tensorflow/python/keras/layers/normalization.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers backed by cuDNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util.tf_export import keras_export
class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers.
Arguments:
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
time_major: Boolean (default False). If true, the inputs and outputs will be
in shape `(timesteps, batch, ...)`, whereas in the False case, it will
be `(batch, timesteps, ...)`.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
**kwargs):
# We invoke the base layer's initializer directly here because we do not
# want to create RNN cell instance.
super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.time_major = time_major
self.supports_masking = False
self.input_spec = [InputSpec(ndim=3)]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
self.constants_spec = None
self._states = None
self._num_constants = 0
self._vector_shape = constant_op.constant([-1])
def call(self, inputs, mask=None, training=None, initial_state=None):
if isinstance(mask, list):
mask = mask[0]
if mask is not None:
raise ValueError('Masking is not supported for CuDNN RNNs.')
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
if self.go_backwards:
# Reverse time axis.
inputs = K.reverse(inputs, 1)
output, states = self._process_batch(inputs, initial_state)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates)
if self.return_state:
return [output] + states
else:
return output
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'time_major': self.time_major,
}
base_config = super( # pylint: disable=bad-super-call
RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def trainable_weights(self):
if self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def non_trainable_weights(self):
if not self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def losses(self):
return super(RNN, self).losses
def get_losses_for(self, inputs=None):
return super( # pylint: disable=bad-super-call
RNN, self).get_losses_for(inputs=inputs)
@keras_export(v1=['keras.layers.CuDNNGRU'])
class CuDNNGRU(_CuDNNRNN):
"""Fast GRU implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=self.units)
super(CuDNNGRU, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNGRU, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.bias = self.add_weight(
shape=(self.units * 6,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_h = array_ops.expand_dims(input_h, axis=0)
params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access
weights=[
self.kernel[:, self.units:self.units * 2],
self.kernel[:, :self.units],
self.kernel[:, self.units * 2:],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units * 2:],
],
biases=[
self.bias[self.units:self.units * 2],
self.bias[:self.units],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 5:],
],
shape=self._vector_shape)
outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs,
input_h=input_h,
input_c=0,
params=params,
is_training=True,
rnn_mode='gru')
if self.stateful or self.return_state:
h = h[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNGRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=['keras.layers.CuDNNLSTM'])
class CuDNNLSTM(_CuDNNRNN):
"""Fast LSTM implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output. in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=(self.units, self.units))
super(CuDNNLSTM, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNLSTM, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return array_ops.concat([
self.bias_initializer((self.units * 5,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
], axis=0)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 8,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_c = initial_state[1]
input_h = array_ops.expand_dims(input_h, axis=0)
input_c = array_ops.expand_dims(input_c, axis=0)
params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access
weights=[
self.kernel[:, :self.units],
self.kernel[:, self.units:self.units * 2],
self.kernel[:, self.units * 2:self.units * 3],
self.kernel[:, self.units * 3:],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, self.units * 2:self.units * 3],
self.recurrent_kernel[:, self.units * 3:],
],
biases=[
self.bias[:self.units],
self.bias[self.units:self.units * 2],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 5:self.units * 6],
self.bias[self.units * 6:self.units * 7],
self.bias[self.units * 7:],
],
shape=self._vector_shape)
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs,
input_h=input_h,
input_c=input_c,
params=params,
is_training=True)
if self.stateful or self.return_state:
h = h[0]
c = c[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h, c]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tensorflow-master | tensorflow/python/keras/layers/cudnn_recurrent.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class Conv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('padding_same_dilation_3', {'padding': 'same', 'dilation_rate': 3}),
('padding_causal', {'padding': 'causal'}),
('strides', {'strides': 2}),
('dilation_rate', {'dilation_rate': 2}),
)
def test_conv1d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
self._run_test(kwargs)
def test_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv1d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class Conv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('strides', {'strides': (2, 2)}),
('dilation_rate', {'dilation_rate': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv2d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class Conv3DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2, 2)}),
('dilation_rate', {'dilation_rate': (2, 2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv3d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_conv3d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session(use_gpu=True):
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
@keras_parameterized.run_all_keras_modes
class ZeroPaddingTest(keras_parameterized.TestCase):
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
def test_zero_padding_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
for data_format in ['channels_first', 'channels_last']:
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': ((1, 2), (3, 4)),
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
def test_zero_padding_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={'padding': (2, 2, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=None)
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
@keras_parameterized.run_all_keras_modes
class UpSamplingTest(keras_parameterized.TestCase):
def test_upsampling_1d(self):
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
def test_upsampling_2d_bilinear(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
testing_utils.layer_test(keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format,
'interpolation': 'bilinear'},
input_shape=inputs.shape)
if not context.executing_eagerly():
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format)
layer.build(inputs.shape)
outputs = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(outputs)
if data_format == 'channels_first':
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
@keras_parameterized.run_all_keras_modes
class CroppingTest(keras_parameterized.TestCase):
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.cached_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)]
for cropping in croppings:
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
if len(croppings) == 3 and len(croppings[0]) == 2:
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
np.testing.assert_allclose(np_output, expected_out)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=(1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=None)
@keras_parameterized.run_all_keras_modes
class DepthwiseConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.DepthwiseConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('depth_multiplier_1', {'depth_multiplier': 1}),
('depth_multiplier_2', {'depth_multiplier': 2}),
)
def test_depthwise_conv2d(self, kwargs):
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_depthwise_conv2d_full(self):
kwargs = {
'kernel_size': 3,
'padding': 'valid',
'data_format': 'channels_last',
'activation': None,
'depthwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'depthwise_constraint': 'unit_norm',
'use_bias': True,
'strides': (2, 2),
'depth_multiplier': 1,
}
self._run_test(kwargs)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/convolutional_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer serialization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras.layers import normalization as batchnorm_v1
from tensorflow.python.keras.layers import normalization_v2 as batchnorm_v2
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.platform import test
@tf_test_util.run_all_in_graph_and_eager_modes
class LayerSerializationTest(parameterized.TestCase, test.TestCase):
def test_serialize_deserialize(self):
layer = keras.layers.Dense(
3, activation='relu', kernel_initializer='ones', bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
if tf2.enabled():
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.OnesV2)
else:
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.Ones)
self.assertEqual(new_layer.units, 3)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_serialize_deserialize_batchnorm(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
self.assertEqual(config['class_name'], 'BatchNormalization')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L1L2)
@parameterized.parameters(
[batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization])
def test_deserialize_batchnorm_backwards_compatiblity(self, batchnorm_layer):
layer = batchnorm_layer(
momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2')
config = keras.layers.serialize(layer)
# To simulate if BatchNormalizationV1 or BatchNormalizationV2 appears in the
# saved model.
if batchnorm_layer is batchnorm_v1.BatchNormalization:
config['class_name'] = 'BatchNormalizationV1'
else:
config['class_name'] = 'BatchNormalizationV2'
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.momentum, 0.9)
if tf2.enabled():
self.assertIsInstance(new_layer, batchnorm_v2.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.ZerosV2)
else:
self.assertIsInstance(new_layer, batchnorm_v1.BatchNormalization)
self.assertEqual(new_layer.beta_initializer.__class__,
keras.initializers.Zeros)
self.assertEqual(new_layer.gamma_regularizer.__class__,
keras.regularizers.L1L2)
@parameterized.parameters([rnn_v1.LSTM, rnn_v2.LSTM])
def test_serialize_deserialize_lstm(self, layer):
lstm = layer(5, return_sequences=True)
config = keras.layers.serialize(lstm)
self.assertEqual(config['class_name'], 'LSTM')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.LSTM)
else:
self.assertIsInstance(new_layer, rnn_v1.LSTM)
self.assertNotIsInstance(new_layer, rnn_v2.LSTM)
@parameterized.parameters([rnn_v1.GRU, rnn_v2.GRU])
def test_serialize_deserialize_gru(self, layer):
gru = layer(5, return_sequences=True)
config = keras.layers.serialize(gru)
self.assertEqual(config['class_name'], 'GRU')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf2.enabled():
self.assertIsInstance(new_layer, rnn_v2.GRU)
else:
self.assertIsInstance(new_layer, rnn_v1.GRU)
self.assertNotIsInstance(new_layer, rnn_v2.GRU)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/serialization_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions.
"""
# pylint: disable=wildcard-import
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras.engine.base_layer import AddLoss
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.layers.advanced_activations import *
from tensorflow.python.keras.layers.convolutional import *
from tensorflow.python.keras.layers.convolutional_recurrent import *
from tensorflow.python.keras.layers.core import *
from tensorflow.python.keras.layers.cudnn_recurrent import *
from tensorflow.python.keras.layers.embeddings import *
from tensorflow.python.keras.layers.local import *
from tensorflow.python.keras.layers.merge import *
from tensorflow.python.keras.layers.noise import *
from tensorflow.python.keras.layers.normalization import *
from tensorflow.python.keras.layers.pooling import *
from tensorflow.python.keras.layers.recurrent import *
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import *
from tensorflow.python.keras.layers.wrappers import *
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.util.tf_export import keras_export
if tf2.enabled():
from tensorflow.python.keras.layers.normalization_v2 import * # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.layers.recurrent_v2 import * # pylint: disable=g-import-not-at-top
# This deserialization table is added for backward compatibility, as in TF 1.13,
# BatchNormalizationV1 and BatchNormalizationV2 are used as class name for v1
# and v2 version of BatchNormalization, respectively. Here we explicitly convert
# them to the canonical name in the config of deserialization.
_DESERIALIZATION_TABLE = {
'BatchNormalizationV1': 'BatchNormalization',
'BatchNormalizationV2': 'BatchNormalization',
}
@keras_export('keras.layers.serialize')
def serialize(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
# Prevent circular dependencies.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.feature_column import feature_column_v2 # pylint: disable=g-import-not-at-top
globs = globals() # All layers.
globs['Network'] = models.Network
globs['Model'] = models.Model
globs['Sequential'] = models.Sequential
# Prevent circular dependencies with FeatureColumn serialization.
globs['DenseFeatures'] = feature_column_v2.DenseFeatures
layer_class_name = config['class_name']
if layer_class_name in _DESERIALIZATION_TABLE:
config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name]
return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer')
| tensorflow-master | tensorflow/python/keras/layers/serialization.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests dense attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseDenseAttentionTest(test.TestCase):
def test_one_dim_with_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 1]
scores_mask = np.array([[[True]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_no_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 3]
scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
# attention_distribution001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8
# = 1.35795272077
expected = np.array([[[1.35795272077]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_no_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected attention distribution = softmax(scores).
# => attention_distribution000 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
# attention_distribution001 = exp(0)/(exp(1) + exp(0) + exp(1))
# = 0.15536240349
# attention_distribution002 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7
# - 0.42231879825 * 0.8
# = 0.44660872104
expected = np.array([[[0.44660872104]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_batch_size_two(self):
# Scores tensor of shape [2, 1, 1]
scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Value tensor of shape [2, 1, 1]
v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
# Scpres mask tensor of shape [2, 1, 1]
scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)
actual = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected tensor of shape [2, 1, 1].
# expected000 = softmax(scores)[0, 0] * 1.6 = 1.6
# expected100 = softmax(scores)[1, 0] * 2.6 = 2.6
expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
@test_util.run_all_in_graph_and_eager_modes
class AttentionTest(test.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 1.1*1.6 = 1.76
expected = np.array([[[1.76]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 2, 3].
# expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64
# expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24
# expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84
# expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24
# expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84
# expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44
expected = np.array(
[[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 1.1*1.6 = 1.76
# expected100 = 2.1*2.6 = 5.46
expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_with_scale(self):
"""Tests that scores are multiplied by scale."""
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
attention_layer.scale = -2.
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = -2*1.1*1.6 = -3.52
expected = np.array([[[-3.52]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
expected = np.array([[[1.3561791301]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3
# = 0.58127362329
expected = np.array([[[0.58127362329]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# Expected scores of shape [1, 2, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]
# = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
# => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35))
# = 0.38936076605
# attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35))
# = 0.61063923394
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
# expected000 = 0
expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_scale_None(self):
"""Tests that scale is None by default."""
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertIsNone(attention_layer.scale)
def test_scale_init_eager(self):
"""Tests that scale initializes to 1 when use_scale=True."""
with context.eager_mode():
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertAllClose(1., attention_layer.scale.value())
@test_util.deprecated_graph_mode_only
def test_scale_init_graph(self):
"""Tests that scale initializes to 1 when use_scale=True."""
with self.cached_session() as sess:
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
sess.run(attention_layer.scale.initializer)
self.assertAllClose(1., attention_layer.scale.value())
def test_self_attention_causal(self):
# Query-value tensor of shape [1, 3, 1]
q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
attention_layer = dense_attention.Attention(causal=True)
actual = attention_layer([q, q])
# Expected scores of shape [1, 3, 3]
# scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]]
# Expected attention distribution = softmax(scores) lower triangular
# => attention_distribution00 = [1., 0., 0.]
# attention_distribution01
# = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64))
# = [0.44028635073, 0.55971364926, 0.]
# attention_distribution02
# = [exp(-0.15), exp(-0.24), exp(0.09)]
# / (exp(-0.15) + exp(-0.24) + exp(0.09))
# = [0.31395396638, 0.28693232061, 0.399113713]
#
# Expected tensor of shape [1, 3, 1].
# expected000 = 0.5
# expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8
# = 0.66791409477
# expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3
# = 0.26678872577
expected = np.array(
[[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_inputs_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError, 'Attention layer must be called on a list of inputs'):
attention_layer(q)
def test_inputs_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q])
def test_inputs_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q, q, q, q])
def test_mask_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list'):
attention_layer([q, q], mask=mask)
def test_mask_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask])
def test_mask_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegexp(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask, mask, mask])
def test_override_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking()(np.array([[[1.1]]], dtype=np.float32))
mask = np.array([[False]], dtype=np.bool_)
actual = attention_layer([q, q], mask=[mask, mask])
self.assertAllClose([[[0]]], actual)
def test_implicit_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32))
v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32))
actual = attention_layer([q, v])
self.assertAllClose([[[0], [1]]], actual)
@test_util.run_all_in_graph_and_eager_modes
class AdditiveAttentionTest(test.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
expected = np.array([[[0.49550372683]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
# Scale tensor of shape [4]
attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# pylint:disable=line-too-long
# expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
# expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
# expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
# expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
# expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
# expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
# pylint:enable=line-too-long
expected = np.array(
[[[2.58044532581, 2.59734317449, 2.59964024652],
[2.59734317449, 2.59964024652, 2.59995130916]]],
dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
# expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277
expected = np.array(
[[[0.49550372683]], [[0.49991728277]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_no_scale(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention(use_scale=False)
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v, k], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3
# = 0.64834251342
# pylint:enable=line-too-long
expected = np.array([[[0.64834251342]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 2, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)],
# [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622],
# [0.40024951088, 0.09868766011, -0.43086157965]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
# => attention_distribution010
# = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.57482427975
# attention_distribution011
# = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.42517572025
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# expected000 = 0
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
@test_util.run_all_in_graph_and_eager_modes
class LowerTriangularMaskTest(test.TestCase):
def test_square_shape(self):
actual = dense_attention._lower_triangular_mask([3, 3])
expected = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_orthogonal_shape(self):
actual = dense_attention._lower_triangular_mask([3, 2])
expected = np.array(
[[True, False], [True, True], [True, True]], dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_three_dim(self):
actual = dense_attention._lower_triangular_mask([1, 3, 3])
expected = np.array(
[[[True, False, False], [True, True, False], [True, True, True]]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/dense_attention_test.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.recurrent import _standardize_args
from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import keras_export
class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers.
Arguments:
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell
output). This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)`
if data_format='channels_first' or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)`
if data_format='channels_last'.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- If `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
- Else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
Masking:
This layer supports masking for input data with a variable number
of timesteps.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
- If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- If functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers,
e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified
too.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
self.states = None
self._num_constants = None
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec],
self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
array_ops.zeros(tuple(shape)),
padding=self.cell.padding)
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if initial_state is None and constants is None:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = []
for state in initial_state:
shape = K.int_shape(state)
self.state_spec.append(InputSpec(shape=shape))
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]):
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if K.is_keras_tensor(additional_inputs[0]):
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(ConvRNN2D, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
timesteps = K.int_shape(inputs)[1]
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(K.update(self.states[i], states[i]))
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1].concatenate(state_shape[2:])
if None in state_shape:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.\n'
'The same thing goes for the number of rows and '
'columns.')
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == 'channels_first':
result[1] = nb_channels
elif self.cell.data_format == 'channels_last':
result[3] = nb_channels
else:
raise KeyError
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros(get_tuple_shape(self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
K.set_value(self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, ' +
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str(get_tuple_shape(dim)) +
', found shape=' + str(value.shape))
# TODO(anjalisridhar): consider batch calls to `set_value`.
K.set_value(state, value)
class ConvLSTM2DCell(DropoutRNNCellMixin, Layer):
"""Cell class for the ConvLSTM2D layer.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.]
(http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 4D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.filters,), *args, **kwargs),
initializers.Ones()((self.filters,), *args, **kwargs),
self.bias_initializer((self.filters * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
# dropout matrices for input units
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
# dropout matrices for recurrent units
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(kernel_i, kernel_f,
kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3)
(recurrent_kernel_i,
recurrent_kernel_f,
recurrent_kernel_c,
recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3)
if self.use_bias:
bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4)
else:
bias_i, bias_f, bias_c, bias_o = None, None, None, None
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2DCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ConvLSTM2D')
class ConvLSTM2D(ConvRNN2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
By default hyperbolic tangent activation function is applied
(`tanh(x)`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.]
(http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or `recurrent_dropout`
are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, channels, rows, cols)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, rows, cols, channels)`
Output shape:
- If `return_sequences`
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- Else
- If data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- If data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where `o_row` and `o_col` depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = ConvLSTM2DCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(ConvLSTM2D, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(ConvLSTM2D, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(
self.activity_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2D, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| tensorflow-master | tensorflow/python/keras/layers/convolutional_recurrent.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for noise layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class NoiseLayersTest(keras_parameterized.TestCase):
def test_GaussianNoise(self):
testing_utils.layer_test(
keras.layers.GaussianNoise,
kwargs={'stddev': 1.},
input_shape=(3, 2, 3))
def test_GaussianDropout(self):
testing_utils.layer_test(
keras.layers.GaussianDropout,
kwargs={'rate': 0.5},
input_shape=(3, 2, 3))
def test_AlphaDropout(self):
testing_utils.layer_test(
keras.layers.AlphaDropout,
kwargs={'rate': 0.2},
input_shape=(3, 2, 3))
if __name__ == '__main__':
test.main()
| tensorflow-master | tensorflow/python/keras/layers/noise_test.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import keras_export
# Keras has undeclared dependency on tensorflow/estimator:estimator_py.
# As long as you depend //third_party/py/tensorflow:tensorflow target
# everything will work as normal.
# LINT.IfChange
@keras_export(v1=['keras.estimator.model_to_estimator'])
def model_to_estimator(
keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None,
checkpoint_format='saver'):
"""Constructs an `Estimator` instance from given keras model.
For usage example, please see:
[Creating estimators from Keras
Models](https://tensorflow.org/guide/estimators#model_to_estimator).
__Sample Weights__
Estimators returned by `model_to_estimator` are configured to handle sample
weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample
weights when training or evaluating the Estimator, the first item returned by
the input function should be a dictionary with keys `features` and
`sample_weights`. Example below:
```
keras_model = tf.keras.Model(...)
keras_model.compile(...)
estimator = tf.keras.estimator.model_to_estimator(keras_model)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Args:
keras_model: A compiled Keras model object. This argument is mutually
exclusive with `keras_model_path`.
keras_model_path: Path to a compiled Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras model.
This argument is mutually exclusive with `keras_model`.
custom_objects: Dictionary for custom objects.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc.
config: `RunConfig` to config `Estimator`.
checkpoint_format: Sets the format of the checkpoint saved by the estimator
when training. May be `saver` or `checkpoint`, depending on whether to
save checkpoints from `tf.train.Saver` or `tf.train.Checkpoint`. This
argument currently defaults to `saver`. When 2.0 is released, the default
will be `checkpoint`. Estimators use name-based `tf.train.Saver`
checkpoints, while Keras models use object-based checkpoints from
`tf.train.Checkpoint`. Currently, saving object-based checkpoints from
`model_to_estimator` is only supported by Functional and Sequential
models.
Returns:
An Estimator from given keras model.
Raises:
ValueError: if neither keras_model nor keras_model_path was given.
ValueError: if both keras_model and keras_model_path was given.
ValueError: if the keras_model_path is a GCS URI.
ValueError: if keras_model has not been compiled.
ValueError: if an invalid checkpoint_format was given.
"""
try:
from tensorflow_estimator.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top
except ImportError:
raise NotImplementedError(
'tf.keras.estimator.model_to_estimator function not available in your '
'installation.')
return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg
keras_model=keras_model,
keras_model_path=keras_model_path,
custom_objects=custom_objects,
model_dir=model_dir,
config=config,
checkpoint_format=checkpoint_format)
@keras_export('keras.estimator.model_to_estimator', v1=[])
def model_to_estimator_v2(
keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None,
checkpoint_format='checkpoint'):
"""Constructs an `Estimator` instance from given keras model.
For usage example, please see:
[Creating estimators from Keras
Models](https://tensorflow.org/guide/estimators#model_to_estimator).
Args:
keras_model: A compiled Keras model object. This argument is mutually
exclusive with `keras_model_path`.
keras_model_path: Path to a compiled Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras model.
This argument is mutually exclusive with `keras_model`.
custom_objects: Dictionary for custom objects.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc.
config: `RunConfig` to config `Estimator`.
checkpoint_format: Sets the format of the checkpoint saved by the estimator
when training. May be `saver` or `checkpoint`, depending on whether to
save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`.
The default is `checkpoint`. Estimators use name-based `tf.train.Saver`
checkpoints, while Keras models use object-based checkpoints from
`tf.train.Checkpoint`. Currently, saving object-based checkpoints from
`model_to_estimator` is only supported by Functional and Sequential
models.
Returns:
An Estimator from given keras model.
Raises:
ValueError: if neither keras_model nor keras_model_path was given.
ValueError: if both keras_model and keras_model_path was given.
ValueError: if the keras_model_path is a GCS URI.
ValueError: if keras_model has not been compiled.
ValueError: if an invalid checkpoint_format was given.
"""
try:
from tensorflow_estimator.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top
except ImportError:
raise NotImplementedError(
'tf.keras.estimator.model_to_estimator function not available in your '
'installation.')
return keras_lib.model_to_estimator( # pylint:disable=unexpected-keyword-arg
keras_model=keras_model,
keras_model_path=keras_model_path,
custom_objects=custom_objects,
model_dir=model_dir,
config=config,
checkpoint_format=checkpoint_format)
# LINT.ThenChange(//tensorflow_estimator/python/estimator/keras.py)
| tensorflow-master | tensorflow/python/keras/estimator/__init__.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR100 small images classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.datasets.cifar import load_batch
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.cifar100.load_data')
def load_data(label_mode='fine'):
"""Loads CIFAR100 dataset.
Arguments:
label_mode: one of "fine", "coarse".
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: in case of invalid `label_mode`.
"""
if label_mode not in ['fine', 'coarse']:
raise ValueError('`label_mode` must be one of `"fine"`, `"coarse"`.')
dirname = 'cifar-100-python'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
fpath = os.path.join(path, 'train')
x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')
fpath = os.path.join(path, 'test')
x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| tensorflow-master | tensorflow/python/keras/datasets/cifar100.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.fashion_mnist.load_data')
def load_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
| tensorflow-master | tensorflow/python/keras/datasets/fashion_mnist.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.