python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import attention_wrapper
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths",
"accumulated_attention_probs"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = tf.convert_to_tensor(t, name="t")
shape_t = tf.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = tf.tile(tf.expand_dims(t, 1), tiling)
tiled = tf.reshape(
tiled, tf.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tf.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = tf.contrib.framework.nest.flatten(t)
with tf.name_scope(name, "tile_batch", flat_t + [multiplier]):
return tf.contrib.framework.nest.map_structure(
lambda t_: _tile_batch(t_, multiplier), t)
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape[0].value or tf.shape(parent_ids)[0]
batch_size = parent_ids.shape[1].value or tf.shape(parent_ids)[1]
beam_width = parent_ids.shape[2].value or tf.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = tf.expand_dims(
tf.expand_dims(tf.range(beam_width), 0), 0)
beam_ids = tf.tile(beam_ids, [max_time, batch_size, 1])
max_sequence_lengths = tf.to_int32(tf.reduce_max(sequence_length, axis=1))
sorted_beam_ids = tf.contrib.seq2seq.gather_tree(
step_ids=beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
in_bound_steps = tf.transpose(
tf.sequence_mask(sequence_length, maxlen=max_time),
perm=[2, 0, 1])
sorted_beam_ids = tf.where(
in_bound_steps, x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = tf.tile(tf.reshape(
tf.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = tf.tile(tf.reshape(
tf.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = tf.transpose(batch_ind, perm=[1, 0, 2])
indices = tf.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = tf.shape(gather_from)
gather_from = tf.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = tf.gather_nd(gather_from, indices)
ordered = tf.reshape(ordered, final_shape)
return ordered
def _check_maybe(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tf.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf.logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = tf.shape(t)
if rank == 2:
condition = tf.equal(shape[1], batch_size * beam_width)
else:
condition = tf.logical_or(
tf.equal(shape[1], batch_size * beam_width),
tf.logical_and(
tf.equal(shape[1], batch_size),
tf.equal(shape[2], beam_width)))
return tf.Assert(condition, [error_message])
class BeamSearchDecoder(tf.contrib.seq2seq.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Meanwhile, with `AttentionWrapper`, coverage penalty is suggested to use
when computing scores(https://arxiv.org/pdf/1609.08144.pdf). It encourages
the translation to cover all inputs.
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
coverage_penalty_weight=0.0,
reorder_tensor_arrays=True):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
if (output_layer is not None and
not isinstance(output_layer, tf.layers.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
self._reorder_tensor_arrays = reorder_tensor_arrays
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: tf.nn.embedding_lookup(embedding, ids))
self._start_tokens = tf.convert_to_tensor(
start_tokens, dtype=tf.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = tf.convert_to_tensor(
end_token, dtype=tf.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = tf.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._coverage_penalty_weight = coverage_penalty_weight
self._initial_cell_state = tf.contrib.framework.nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = tf.tile(
tf.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = tf.one_hot(
tf.zeros([self._batch_size], dtype=tf.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=tf.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = tf.contrib.framework.nest.map_structure(
lambda s: tf.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return tf.contrib.framework.nest.map_structure(
lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tf.TensorShape([self._beam_width]),
predicted_ids=tf.TensorShape([self._beam_width]),
parent_ids=tf.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = tf.contrib.framework.nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=tf.contrib.framework.nest.map_structure(
lambda _: dtype, self._rnn_output_size()),
predicted_ids=tf.int32,
parent_ids=tf.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = tf.contrib.framework.nest.flatten(self._initial_cell_state)[0].dtype
log_probs = tf.one_hot( # shape(batch_sz, beam_sz)
tf.zeros([self._batch_size], dtype=tf.int32),
depth=self._beam_width,
on_value=tf.convert_to_tensor(0.0, dtype=dtype),
off_value=tf.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
init_attention_probs = get_attention_probs(
self._initial_cell_state, self._coverage_penalty_weight)
if init_attention_probs is None:
init_attention_probs = ()
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=tf.zeros(
[self._batch_size, self._beam_width], dtype=tf.int64),
accumulated_attention_probs=init_attention_probs)
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = tf.to_int32(
tf.reduce_max(final_state.lengths, axis=1))
predicted_ids = tf.contrib.seq2seq.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
final_state = final_state._replace(
cell_state=tf.contrib.framework.nest.map_structure(
lambda t: self._maybe_sort_array_beams(
t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, tf.Tensor):
s = tf.contrib.util.constant_value(s)
if isinstance(s, tf.TensorShape):
return s
else:
s = tf.TensorShape(s)
else:
s = tf.TensorShape(s)
t_shape = tf.shape(t)
static_batch_size = tf.contrib.util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = tf.reshape(
t,
tf.concat(([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tf.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, tf.Tensor):
s = tf.TensorShape(tf.contrib.util.constant_value(s))
else:
s = tf.TensorShape(s)
t_shape = tf.shape(t)
reshaped_t = tf.reshape(
t,
tf.concat(([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tf.contrib.util.constant_value(self._batch_size)
expected_reshaped_shape = tf.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tf.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tf.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tf.TensorArray):
return t
# pylint: disable=protected-access
if (not t._infer_shape or not t._element_shape
or t._element_shape[0].ndims is None
or t._element_shape[0].ndims < 1):
shape = (
t._element_shape[0] if t._infer_shape and t._element_shape
else tf.TensorShape(None))
tf.logging.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, shape))
return t
shape = t._element_shape[0]
# pylint: enable=protected-access
if not _check_static_batch_beam_maybe(
shape, tf.contrib.util.constant_value(self._batch_size),
self._beam_width):
return t
t = t.stack()
with tf.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
coverage_penalty_weight = self._coverage_penalty_weight
with tf.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = tf.contrib.framework.nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = tf.contrib.framework.nest.map_structure(
self._maybe_merge_batch_beams, cell_state, self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = tf.contrib.framework.nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = tf.contrib.framework.nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = tf.cond(
tf.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight,
coverage_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tf.contrib.util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
not_finished = tf.logical_not(previously_finished)
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = tf.nn.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = tf.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or tf.shape(logits)[-1]
lengths_to_add = tf.one_hot(
indices=tf.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=tf.int64)
add_mask = tf.to_int64(not_finished)
lengths_to_add *= tf.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + tf.expand_dims(prediction_lengths, 2))
# Calculate the accumulated attention probabilities if coverage penalty is
# enabled.
accumulated_attention_probs = None
attention_probs = get_attention_probs(
next_cell_state, coverage_penalty_weight)
if attention_probs is not None:
attention_probs *= tf.expand_dims(tf.to_float(not_finished), 2)
accumulated_attention_probs = (
beam_state.accumulated_attention_probs + attention_probs)
batch_finished = tf.reduce_all(
previously_finished, axis=1, keepdims=True)
any_batch_finished = tf.reduce_any(batch_finished)
batch_finished = tf.tile(tf.expand_dims(batch_finished, 2),
[1, beam_width, vocab_size])
def _normalized_scores():
return _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
finished=batch_finished,
accumulated_attention_probs=accumulated_attention_probs)
# Normalize the scores of finished batches.
scores = tf.cond(any_batch_finished, _normalized_scores, lambda: total_probs)
time = tf.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = tf.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = tf.convert_to_tensor(
beam_width, dtype=tf.int32, name="beam_width")
next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# tf.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = tf.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = tf.to_int32(raw_next_word_ids)
next_beam_ids = tf.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = tf.logical_or(
previously_finished,
tf.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = tf.to_int64(tf.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
next_accumulated_attention_probs = ()
if accumulated_attention_probs is not None:
next_accumulated_attention_probs = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=accumulated_attention_probs,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1],
name="next_accumulated_attention_probs")
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = tf.contrib.framework.nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished,
accumulated_attention_probs=next_accumulated_attention_probs)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def get_attention_probs(next_cell_state, coverage_penalty_weight):
"""Get attention probabilities from the cell state.
Args:
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
The attention probabilities with shape `[batch_size, beam_width, max_time]`
if coverage penalty is enabled. Otherwise, returns None.
Raises:
ValueError: If no cell is attentional but coverage penalty is enabled.
"""
if coverage_penalty_weight == 0.0:
return None
# Attention probabilities of each attention layer. Each with shape
# `[batch_size, beam_width, max_time]`.
probs_per_attn_layer = []
if isinstance(next_cell_state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer = [attention_probs_from_attn_state(next_cell_state)]
elif isinstance(next_cell_state, tuple):
for state in next_cell_state:
if isinstance(state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer.append(attention_probs_from_attn_state(state))
if not probs_per_attn_layer:
raise ValueError(
"coverage_penalty_weight must be 0.0 if no cell is attentional.")
if len(probs_per_attn_layer) == 1:
attention_probs = probs_per_attn_layer[0]
else:
# Calculate the average attention probabilities from all attention layers.
attention_probs = [
tf.expand_dims(prob, -1) for prob in probs_per_attn_layer]
attention_probs = tf.concat(attention_probs, -1)
attention_probs = tf.reduce_mean(attention_probs, -1)
return attention_probs
def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
coverage_penalty_weight, finished, accumulated_attention_probs):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
finished: A boolean tensor of shape `[batch_size, beam_width, vocab_size]`
that specifies which elements in the beam are finished already.
accumulated_attention_probs: Accumulated attention probabilities up to the
current time step, with shape `[batch_size, beam_width, max_time]` if
coverage_penalty_weight is not 0.0.
Returns:
The scores normalized by the length_penalty and coverage_penalty.
Raises:
ValueError: accumulated_attention_probs is None when coverage penalty is
enabled.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
coverage_penalty_weight = tf.convert_to_tensor(
coverage_penalty_weight, name="coverage_penalty_weight")
if coverage_penalty_weight.shape.ndims != 0:
raise ValueError("coverage_penalty_weight should be a scalar, "
"but saw shape: %s" % coverage_penalty_weight.shape)
if accumulated_attention_probs is None:
raise ValueError(
"accumulated_attention_probs can be None only if coverage penalty is "
"disabled.")
# Add source sequence length mask before computing coverage penalty.
accumulated_attention_probs = tf.where(
tf.equal(accumulated_attention_probs, 0.0),
tf.ones_like(accumulated_attention_probs),
accumulated_attention_probs)
# coverage penalty =
# sum over `max_time` {log(min(accumulated_attention_probs, 1.0))}
coverage_penalty = tf.reduce_sum(
tf.log(tf.minimum(accumulated_attention_probs, 1.0)), 2)
# Apply coverage penalty to finished predictions.
weighted_coverage_penalty = coverage_penalty * coverage_penalty_weight
# Reshape from [batch_size, beam_width] to [batch_size, beam_width, 1]
weighted_coverage_penalty = tf.expand_dims(
weighted_coverage_penalty, 2)
# Normalize the scores of finished predictions.
return tf.where(
finished, log_probs / length_penalty_ + weighted_coverage_penalty,
log_probs)
def attention_probs_from_attn_state(attention_state):
"""Calculates the average attention probabilities.
Args:
attention_state: An instance of `AttentionWrapperState`.
Returns:
The attention probabilities in the given AttentionWrapperState.
If there're multiple attention mechanisms, return the average value from
all attention mechanisms.
"""
# Attention probabilities over time steps, with shape
# `[batch_size, beam_width, max_time]`.
attention_probs = attention_state.alignments
if isinstance(attention_probs, tuple):
attention_probs = [
tf.expand_dims(prob, -1) for prob in attention_probs]
attention_probs = tf.concat(attention_probs, -1)
attention_probs = tf.reduce_mean(attention_probs, -1)
return attention_probs
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = tf.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tf.contrib.util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return tf.div((5. + tf.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = tf.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = tf.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=tf.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = tf.tile(
tf.reshape(finished_row, [1, 1, -1]),
tf.concat([tf.shape(finished), [1]], 0))
finished_mask = tf.tile(
tf.expand_dims(finished, 2), [1, 1, vocab_size])
return tf.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tf.TensorArray):
return gather_from
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with tf.name_scope(name, "tensor_gather_helper"):
range_ = tf.expand_dims(tf.range(batch_size) * range_size, 1)
gather_indices = tf.reshape(gather_indices + range_, [-1])
output = tf.gather(
tf.reshape(gather_from, gather_shape), gather_indices)
final_shape = tf.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tf.contrib.util.constant_value(batch_size)
final_static_shape = (
tf.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = tf.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/beam_search_decoder.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import print_function
import collections as pycoll
import operator
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradients_impl
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = tf.contrib.staging.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = tf.contrib.staging.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = gradients_impl._AggregateIndexedSlicesGradients(grads) # pylint: disable=protected-access
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/variable_mgr_util.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes and functions for doing a single-machine batch all-reduce.
An all-reduce is taking the reduction (typically a sum) of a list of tensors,
each on a different device. The result must end up back on each device, which is
where the word "all" comes from. In summary, each device starts with a single
tensor, and ends up with the reduction of all tensors.
A batch all-reduce is doing several independent all-reduces. When doing a batch
all-reduce, care is taken to evenly distribute the reduction computations
across devices and inter-device tensor transfers across device links.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(reedwm): Support distributed all-reduces in this file.
# TODO(reedwm): Merge this code with allreduce.py, which contains some batch
# all-reduce code that this file calls. allreduce.py also supports distributed
# batch-reduce while this file only supports single-machine all-reduce.
import abc
from collections import namedtuple
import six
import tensorflow as tf
from tensorflow.python.ops import gradients_impl
from variable_mgr import allreduce
from variable_mgr import constants
def _all_reduce_using_copy(tensors_across_devices, use_mean):
"""Does an all-reduce of a list of tensors by copying to the current device.
The tensors are copied to the current device and then reduced.
Args:
tensors_across_devices: A list of tensors, each on a different device.
use_mean: Whether to take the mean of the tensors instead of a sum:
Returns:
A reduced tensor on the current device.
"""
assert tensors_across_devices
if isinstance(tensors_across_devices[0], tf.IndexedSlices):
reduced_tensor = gradients_impl._AggregateIndexedSlicesGradients(
tensors_across_devices)
if use_mean:
val = tf.multiply(reduced_tensor.values,
float(1. / len(tensors_across_devices)))
reduced_tensor = tf.IndexedSlices(val, reduced_tensor.indices,
reduced_tensor.dense_shape)
else:
reduced_tensor = tf.add_n(tensors_across_devices)
if use_mean:
reduced_tensor *= 1. / len(tensors_across_devices)
return reduced_tensor
@six.add_metaclass(abc.ABCMeta)
class BatchAllReduceAlgorithm(object):
"""Represents an algorithm for performing a batch all-reduce operation."""
def batch_all_reduce(self, all_device_tensors, num_splits, compact_tensors,
defer_tensors):
"""Performs a batch all-reduce.
The reduction done is a sum.
`all_device_tensors` is a list of list of tensors that will be batch
all-reduced. All tensors within a single inner list must be on the same
device. The nth element in each list, for any n, will be reduced together.
The return value is in the same form as `all_device_tensors`, except that
each tensor is reduced.
For example, if `all_device_tensors` is:
[[ A, B ], # A and B are on GPU 0
[ C, D ]] # C and D are on GPU 1
Then the return value will be:
[[ A+C, B+D ], # These two tensors are on GPU 0
[ A+C, B+D ]] # These two tensors are on GPU 1
Arguments:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
num_splits: If not None, tensors will be concatenated and split into this
many pieces during the all-reduce, then split back into their original
shapes afterwards. Has no impact on correctness and can improve
performance. Requires all tensors to be the same type.
compact_tensors: If True, tensors are casted to fp16 before being all-
reduced. Improves performance, but hurts numerical stability.
defer_tensors: If True, every time the return value
`reduced_all_device_tensors` is evaluated, the result will be the
reduced tensors values of `all_device_tensors` from the previous session
run instead of the current session run, or zero on the first session
run. This can improve performance. When training neural networks,
deferring gradients often does not harm training, so this can be used to
improve performance.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
warmup_ops: A list of ops needed to be run once before the all-reduce can
occur.
"""
# Before all-reducing tensors, we do several preprocessing functions that
# can speed up the all-reduce. We undo these functions after all-reducing
# the tensors.
warmup_ops = []
if num_splits:
packer = _TensorPacker(num_splits)
all_device_tensors = packer.concat_all_device_tensors(all_device_tensors)
# If enabled, we compact and defer tensors in between concatenating them
# and splitting them, because it is faster to do operations on a single
# concatenated tensor than on multiple smaller tensors.
if compact_tensors:
all_device_tensors_before_compact = all_device_tensors
all_device_tensors = _compact_all_device_tensors(all_device_tensors)
if defer_tensors:
all_device_tensors, put_ops, warmup_ops = _defer_all_device_tensors(
all_device_tensors)
if num_splits:
all_device_tensors = packer.split_all_device_tensors(all_device_tensors)
all_device_tensors = self._do_batch_all_reduce(all_device_tensors)
# Undo the preprocessing operations in opposite order as we applied them.
if num_splits:
all_device_tensors = packer.undo_split_all_device_tensors(
all_device_tensors)
# Note: There is no undo operation for deferring tensors. But we do need to
# call _add_put_op_control_deps at the end if we deferred the tensors.
if compact_tensors:
all_device_tensors = _undo_compact_all_device_tensors(
all_device_tensors, all_device_tensors_before_compact)
if num_splits:
all_device_tensors = packer.undo_concat_all_device_tensors(
all_device_tensors)
if defer_tensors:
all_device_tensors = _add_put_op_control_deps(all_device_tensors,
num_splits, put_ops)
return all_device_tensors, warmup_ops
@abc.abstractmethod
def _do_batch_all_reduce(self, all_device_tensors):
"""Performs a batch all-reduce.
Unlike `self.batch_all_reduce`, this does not do any preprocessing of the
tensors.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
"""
pass
class CopyToDeviceAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that copies tensors to be reduced to a specific device."""
def __init__(self, devices_to_reduce_on, use_mean=False):
self._devices = devices_to_reduce_on
self._use_mean = use_mean
def _do_batch_all_reduce(self, all_device_tensors):
reduced_tensors = []
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
with tf.device(self._devices[i % len(self._devices)]):
reduced_tensor = _all_reduce_using_copy(tensors_across_devices,
self._use_mean)
reduced_tensors.append(reduced_tensor)
# The tensors will be brought back to each device once they are used.
return [reduced_tensors] * len(all_device_tensors)
class HierarchicalCopyAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses hierarchical copies. This is only optimized for
eight devices connected in NetworkTopology.DGX1 or NetworkTopology.GCP_V100
topology.
"""
def __init__(self, network_topology):
"""Initializer for HierarchicalCopyAlgorithm.
Args:
network_topology: An instance of Enum class constants.NetworkTopology.
"""
self._network_topology = network_topology
def _do_batch_all_reduce(self, all_device_tensors):
avail_devices = [device_tensors[0].device
for device_tensors in all_device_tensors]
reduced_tensors = []
num_devices = len(avail_devices)
group_size = num_devices // 2
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
group_0_main_device, group_1_main_device = self.__get_main_devices(
i, num_devices)
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Reduce the first group.
group_0_tensors = tensors_across_devices[group_0_begin:
group_0_begin + group_size]
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor = _all_reduce_using_copy(group_0_tensors, False)
# Reduce the second group.
group_1_tensors = tensors_across_devices[group_1_begin:
group_1_begin + group_size]
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor = _all_reduce_using_copy(group_1_tensors, False)
# Reduce between the groups.
with tf.device(avail_devices[group_0_main_device]):
total_reduced_tensor = _all_reduce_using_copy(
[group_0_reduced_tensor, group_1_reduced_tensor], False)
# Broadcast the result back into the root of each group.
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
reduced_tensors_bcast = []
for j in range(len(tensors_across_devices)):
with tf.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_tensor = group_0_reduced_tensor_bcast
else:
src_device_tensor = group_1_reduced_tensor_bcast
reduced_tensors_bcast.append(tf.identity(src_device_tensor))
reduced_tensors.append(reduced_tensors_bcast)
reduced_tensors = list(zip(*reduced_tensors))
return reduced_tensors
def __get_main_devices(self, tensor_index, num_devices):
"""Returns the pair of main devices to use for initial reduction.
Args:
tensor_index: Index of the current tensor in the list of tensors to copy.
num_devices: Total number of devices.
Returns:
A tuple containing pair of main device indices for the initial
reduction. Then, the first element of the tuple should be used for the
final reduction.
Raises:
ValueError: Invalid input arguments.
"""
if self._network_topology == constants.NetworkTopology.DGX1:
return tensor_index % num_devices, (tensor_index +
(num_devices // 2)) % num_devices
elif self._network_topology == constants.NetworkTopology.GCP_V100:
if num_devices != 8:
raise ValueError('HierarchicalCopy only supports eight devices in %s.' %
self._network_topology)
# TODO(hinsu): Generalize main device indices to handle any other
# isomorphic connection graph that connects two cliques using connections
# other than 0-5 and 2-7.
main_device_pairs = [(0, 5), (2, 7), (5, 0), (7, 2)]
return main_device_pairs[tensor_index % len(main_device_pairs)]
else:
# TODO(reedwm): make this logic more general for arbitrary topology.
raise ValueError(
'HierarchicalCopy is not supported for %s network topology.' %
self._network_topology)
class AllReduceSpecAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses an all reduce spec."""
def __init__(self, all_reduce_spec, gpu_indices, agg_small_grads_max_bytes,
agg_small_grads_max_group):
spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
if len(spec) != 1:
raise ValueError(
'Replicated mode does not support hybrid all-reduce strategies')
self._all_reduce_spec = spec[0]
self._gpu_indices = gpu_indices
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
def _do_batch_all_reduce(self, all_device_tensors):
# TODO(reedwm): Merge allreduce.sum_gradients_all_reduce with the other
# gradient aggregation code, since gradient aggregation is doing an all
# reduce. Currently, we do gradient repacking in two different places.
# TODO(reedwm): Change the allreduce code to reduce tensors instead of
# tower_grads.
tower_grads = [[(t, None) for t in device_tensors]
for device_tensors in all_device_tensors]
aggregated_device_grads = allreduce.sum_gradients_all_reduce(
False, # single_session
['/job:localhost'],
tower_grads,
1,
self._all_reduce_spec.alg,
self._all_reduce_spec.shards,
self._gpu_indices,
agg_small_grads_max_bytes=self._agg_small_grads_max_bytes,
agg_small_grads_max_group=self._agg_small_grads_max_group)
return [[t for t, _ in grad_vars] for grad_vars in aggregated_device_grads]
def algorithm_from_params(params):
"""Returns a BatchAllReduceAlgorithm from a Params tuple."""
if params.all_reduce_spec:
if params.gpu_indices:
gpu_indices = [int(x) for x in params.gpu_indices.split(',')]
else:
gpu_indices = [x for x in range(params.num_gpus)]
return AllReduceSpecAlgorithm(params.all_reduce_spec, gpu_indices,
params.agg_small_grads_max_bytes,
params.agg_small_grads_max_group)
elif params.hierarchical_copy:
return HierarchicalCopyAlgorithm(params.network_topology)
else:
if params.local_parameter_device == 'gpu':
devices_to_reduce_on = ['/gpu:%d' % i for i in range(params.num_gpus)]
else:
devices_to_reduce_on = ['/cpu:0']
#### Made only for adam optimizer ####
return CopyToDeviceAlgorithm(devices_to_reduce_on, use_mean=True)
def _apply_to_all_device_tensors(all_device_tensors, apply_func, colocate=True):
"""Applies a function to each tensor in `all_device_tensors`.
A new list of lists of tensors is returned, where every tensor in
`all_device_tensors` has had `apply_func` called on it. `all_device_tensors`
is not modified.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
apply_func: A function taking in three arguments: tensor, device_index,
tensor_index, and returning a modified tensor.
`tensor` is `all_device_tensors[device_index][tensor_index]`.
colocate: If True, apply_func will be run under context manager colocated
with it's input tensor.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has had
`apply_func` called on it.
"""
new_all_device_tensors = []
for device_index, device_tensors in enumerate(all_device_tensors):
new_device_tensors = []
for tensor_index, t in enumerate(device_tensors):
if colocate:
with tf.colocate_with(t):
new_t = apply_func(t, device_index, tensor_index)
else:
new_t = apply_func(t, device_index, tensor_index)
new_device_tensors.append(new_t)
new_all_device_tensors.append(new_device_tensors)
return new_all_device_tensors
def _defer_tensor(tensor):
"""Defers the retrieval of a tensor.
The tensor is put into a StagingArea, and the return value is the
retrieval of the tensor from the StagingArea. The effect is that the
tensor returned from this function is the tensor that was put in the
StagingArea for the previous Session.run() call.
Args:
tensor: The tensor to defer for one step.
Returns:
deferred_tensor: The tensor deferred for one step.
put_op: An op to put `tensor` in the StagingArea. Must be run every step
that `deferred_tensor` is run.
warmup_op: A warmup op that should be called before the first step. Puts
a zero tensor into the StagingArea.
"""
tensor_stage = tf.contrib.staging.StagingArea([tensor.dtype], [tensor.shape])
put_op = tensor_stage.put([tensor])
warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# Fetch the next tensor to use.
(tensor,) = tensor_stage.get()
return tensor, put_op, warmup_op
def _defer_all_device_tensors(all_device_tensors):
"""Defers every tensor in `all_device_tensors`."""
put_ops = [[] for _ in all_device_tensors]
warmup_ops = [[] for _ in all_device_tensors]
def apply_func(tensor, device_index, tensor_index):
del tensor_index
tensor, put_op, warmup_op = _defer_tensor(tensor)
put_ops[device_index].append(put_op)
warmup_ops[device_index].append(warmup_op)
return tensor
new_all_device_tensors = _apply_to_all_device_tensors(all_device_tensors,
apply_func)
return new_all_device_tensors, put_ops, warmup_ops
def _add_put_op_control_deps(all_device_tensors, num_splits, put_ops):
"""Add control dependencies from `put_ops` to `all_device_tensors`.
This should only be called when deferred tensors are being used.
The control dependencies are added so that the put ops are run whenever
`all_device_tensors` is run. That way, the caller does not have to explicitly
run the put ops.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
num_splits: The number of splits that were used for the all-reduce.
put_ops: A list of put ops from deferring the tensors.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has a
control dependency on an op in `put_ops`.
"""
def apply_func(tensor, device_index, tensor_index):
if num_splits == 0:
deps = [put_ops[device_index][tensor_index]]
else:
deps = put_ops[device_index]
assert len(deps) == 1
with tf.control_dependencies(deps):
return tf.identity(tensor, name='control_dependency')
return _apply_to_all_device_tensors(all_device_tensors, apply_func)
def _compact_all_device_tensors(all_device_tensors):
"""Compacts each tensor by casting to fp16."""
def apply_func(tensor, device_index, tensor_index):
del device_index, tensor_index
return tf.cast(tensor, tf.float16)
return _apply_to_all_device_tensors(all_device_tensors, apply_func)
def _undo_compact_all_device_tensors(all_device_tensors,
orig_all_device_tensors):
"""Uncompacts each tensor by casting to it's original dtype."""
def apply_func(tensor, device_index, tensor_index):
orig_tensor = orig_all_device_tensors[device_index][tensor_index]
with tf.colocate_with(orig_tensor):
return tf.cast(tensor, orig_tensor.dtype)
return _apply_to_all_device_tensors(all_device_tensors, apply_func,
colocate=False)
class _TensorPacker(object):
"""Packs and unpacks tensors into groups.
This class first concatenates a set of tensors, then split the concatenated
tensor into a small number of chunks. This is useful for all-reducing tensors,
as doing a small number of all-reduces on large tensors can be faster than
doing a large number of all-reduces on small tensors.
"""
def __init__(self, num_splits):
"""Initializes the _TensorPacker.
Args:
num_splits: The number of tensors to split the concatenated tensor into.
The batch all-reduce will consist of `num_splits` all-reduces.
"""
assert num_splits > 0
self._num_splits = num_splits
self._next_method = 'concat'
_concat_tensor_state = namedtuple('_concat_tensor_state',
['orig_shapes', 'orig_sizes'])
def _concat_tensors(self, device_tensors):
"""Concatenate tensors into a single tensor."""
flat_tensors = [tf.reshape(t, [-1]) for t in device_tensors]
orig_shapes = [t.shape for t in device_tensors]
orig_sizes = [s.num_elements() for s in orig_shapes]
# All shapes must be fully defined.
assert None not in orig_sizes
concatenated_grad = tf.concat(flat_tensors, 0)
return concatenated_grad, self._concat_tensor_state(orig_shapes, orig_sizes)
def _split_tensors(self, concatenated_tensor):
"""Splits concatenated tensor into `num_splits` pieces."""
# TODO(zhengxq): it is possible to optimize away the additional
# data movement by copying along the original tensor boundary.
# TODO(zhengxq): it is also possible to optimize away all the concat
# as well.
total_tensor_size = concatenated_tensor.shape.num_elements()
split_size = total_tensor_size // self._num_splits
split_size_last = total_tensor_size - split_size * (self._num_splits - 1)
split_sizes = [split_size] * (self._num_splits - 1) + [split_size_last]
tensor_packs = tf.split(concatenated_tensor, split_sizes)
return tensor_packs
def _undo_split_tensors(self, tensor_packs):
"""Undoes self._split_tensors()."""
return tf.concat(tensor_packs, 0)
def _undo_concat_tensors(self, concatenated_tensor, concat_tensor_state):
"""Undoes self._concat_tensors()."""
tensors_with_sizes = tf.split(concatenated_tensor,
concat_tensor_state.orig_sizes)
tensors_with_shapes = [
tf.reshape(grad, shape)
for grad, shape in zip(tensors_with_sizes,
concat_tensor_state.orig_shapes)
]
return tensors_with_shapes
def concat_all_device_tensors(self, all_device_tensors):
"""For each device, concatenate the device's tensors into a single tensor.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
Returns:
A list of list of tensors in a similar form as all_device_tensors, except
the tensors on each device have been concatenated. Each inner list
consists of a single concatenated tensor.
"""
assert self._next_method == 'concat'
new_all_device_tensors = []
tensor_states = []
for device_tensors in all_device_tensors:
with tf.colocate_with(device_tensors[0]):
concat_tensor, tensor_state = self._concat_tensors(device_tensors)
new_all_device_tensors.append([concat_tensor])
tensor_states.append(tensor_state)
self._tensor_states = tensor_states
self._next_method = 'split'
return new_all_device_tensors
def split_all_device_tensors(self, all_device_tensors):
"""Splits concatenated tensors into `num_splits` pieces.
`num_splits` is specified in the constructor. In the case where the total
size of a concatenated tensor is not divisible by `num_splits`, the last
split tensor gets more elements.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
For each i, `all_device_tensors[i]` must be a list of length 1 of a
single concatenated tensor.
Returns:
A list of list of tensors in a similar form as all_device_tensors, except
the concatenated tensor on each device have been split. Each inner list
is a list of length `num_splits`.
"""
assert self._next_method == 'split'
new_all_device_tensors = []
for [concat_tensor] in all_device_tensors:
with tf.colocate_with(concat_tensor):
new_all_device_tensors.append(self._split_tensors(concat_tensor))
self._orig_concat_all_device_tensors = all_device_tensors
self._next_method = 'undo_split'
return new_all_device_tensors
def undo_split_all_device_tensors(self, all_device_tensors):
"""Undoes the effects of `split_all_device_tensors`."""
assert self._next_method == 'undo_split'
new_all_device_tensors = []
for i, device_tensors in enumerate(all_device_tensors):
[orig_tensor] = self._orig_concat_all_device_tensors[i]
with tf.colocate_with(orig_tensor):
new_all_device_tensors.append(
[self._undo_split_tensors(device_tensors)])
self._next_method = 'undo_concat'
return new_all_device_tensors
def undo_concat_all_device_tensors(self, all_device_tensors):
"""Undoes the effects of `concat_all_device_tensors`."""
assert self._next_method == 'undo_concat'
new_all_device_tensors = []
for [concat_tensor], tensor_state in zip(all_device_tensors,
self._tensor_states):
with tf.colocate_with(concat_tensor):
new_all_device_tensors.append(self._undo_concat_tensors(concat_tensor,
tensor_state))
self._next_method = None
return new_all_device_tensors
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/batch_allreduce.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for allreduce."""
from __future__ import print_function
import collections as pycoll
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.all_reduce.python import all_reduce
from tensorflow.python.ops import collective_ops
AllReduceSpecTuple = pycoll.namedtuple('AllReduceSpecTuple', 'alg shards limit')
def parse_general_int(s):
"""Parse integer with power-of-2 suffix eg. 32k."""
mo = re.match(r'(\d+)([KkMGT]?)$', s)
if mo:
i, suffix = mo.group(1, 2)
v = int(i)
if suffix:
if suffix == 'K' or suffix == 'k':
v *= 1024
elif suffix == 'M':
v *= (1024 * 1024)
elif suffix == 'G':
v *= (1024 * 1024 * 1024)
elif suffix == 'T':
v *= (1024 * 1024 * 1024 * 1024)
else:
raise ValueError('invalid integer string %s' % s)
return v
else:
v = int(s)
return v
def parse_all_reduce_spec(all_reduce_spec):
"""Parse all_reduce_spec.
Args:
all_reduce_spec: a string specifying a combination of all-reduce
algorithms to apply for gradient reduction.
Returns:
a list of AllReduceSpecTuple.
Raises:
ValueError: all_reduce_spec is not well-formed.
An all_reduce_spec has BNF form:
int ::= positive whole number
g_int ::= int[KkMGT]?
alg_spec ::= alg | alg#int
range_spec ::= alg_spec | alg_spec/alg_spec
spec ::= range_spec | range_spec:g_int:range_spec
Not all syntactically correct specifications are supported.
Examples of supported all_reduce_spec strings, with semantics explained:
'collective' == apply tf.collective_reduce operator to all tensors.
'collective#2' == apply tf.collective_reduce operator to all tensors,
requesting up to 2 simultaneous transfers at each node, if
feasible, by subdividing tensor by an additional factor of 2.
'xring' == apply ring all-reduce to all tensors
'xring#2' == apply ring all-reduce to all tensors, using two simultaneous
transfer rings, each operating on 1/2 of each tensor.
'nccl' == apply NCCL all-reduce to all tensors (only works within
a single worker process where all devices are GPUs)
'nccl/xring' == apply NCCL all-reduce to all tensors within each worker
to produce at least one full-reduced (locally) value,
then apply ring all-reduce to one such value from each
worker, then apply NCCL broadcast to propagate those globally
reduced values back to every device within each worker.
'pscpu' == Shuffle reduce using worker CPUs as the gather devices: each
distributed tensor is reduced by copying all instances to
one of the worker CPUs, computing the reduction there, then
copying back to each participating device. Tensor reductions
are assigned to specific CPUs round-robin.
'psgpu#4' == Arrange all GPUs across all workers into groups of 4.
Each distributed tensor is shuffle reduced against one
such group of 4 GPUs, selected round-robin. That is, each
tensor is split across 4 shards for the reduction.
'pscpu:2k:pscpu#2:64k:xring' == Apply single-shard pscpu to
tensors of size <= 2048 elements, apply 2-shard pscpu to
tensors up to size 64k elements, apply xring to larger tensors.
'pscpu/pscpu#2' == Use shuffle gather to locally reduce each tensor on
the worker's CPU, then use 2-shard shuffle to reduce those
locally reduced tensors across workers (on the worker CPUs), then
scatter the globally reduced values locally from each worker CPU.
"""
range_parts = all_reduce_spec.split(':') + ['-1']
if len(range_parts) % 2:
raise ValueError('all_reduce_spec not well formed: %s' % all_reduce_spec)
limit = 0
spec = []
alg = None
shards = 1
for i, range_part in enumerate(range_parts):
if i % 2 == 1:
try:
limit = parse_general_int(range_part)
spec.append(AllReduceSpecTuple(alg=alg, shards=shards, limit=limit))
except ValueError:
raise ValueError('all_reduce_spec (%s) contains non-integer range %s' %
(all_reduce_spec, range_part))
else:
alg = range_part
alg_parts = range_part.split('#')
alg = alg_parts[0]
if len(alg_parts) > 1:
try:
shards = int(alg_parts[1])
except ValueError:
raise ValueError('all_reduce_spec (%s) contains non-integer '
'shards %s' % all_reduce_spec, alg_parts[1])
else:
shards = 1
if alg not in [
'nccl', 'nccl/xring', 'nccl/rechd', 'nccl/pscpu', 'xring', 'pscpu',
'psgpu', 'pscpu/pscpu', 'collective'
]:
raise ValueError('all_reduce_spec (%s) contains invalid alg %s' %
(all_reduce_spec, alg))
return spec
def build_all_reduce_device_prefixes(job_name, num_tasks):
"""Build list of device prefix names for all_reduce.
Args:
job_name: 'worker', 'ps' or 'localhost'.
num_tasks: number of jobs across which device names should be generated.
Returns:
A list of device name prefix strings. Each element spells out the full
host name without adding the device.
e.g. '/job:worker/task:0'
"""
if job_name != 'localhost':
return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)]
else:
assert num_tasks == 1
return ['/job:%s' % job_name]
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: list of strings naming devices.
group_size: int >= 1
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size = 0 then each device will appear
exactly once.
Raises:
ValueError: group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError('only %d devices, but group_size=%d' % (num_devices,
group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(0, num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= theshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
_instance_key = 1
def new_collective_instance_key():
"""Returns a new instance key for use in defining a collective op."""
global _instance_key
v = _instance_key
_instance_key += 1
return v
_group_key = 1
_group_key_table = dict()
def collective_group_key(devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
global _group_key
global _group_key_table
parsed = [tf.DeviceSpec.from_string(d) for d in devices]
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
concat = ','.join(names)
if concat not in _group_key_table.keys():
new_key = _group_key
_group_key += 1
_group_key_table[concat] = new_key
rv = _group_key_table[concat]
return rv
def build_collective_reduce(input_tensors, num_workers, num_shards,
red_op='Add', un_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
num_shards: number of shards into which to divide each per-tick chunk,
normally 1 but could be higher on multi-data-path architectures.
red_op: string naming the reduction op
un_op: string naming the unary final op
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_group_key(devices)
instance_key = new_collective_instance_key()
out_tensors = []
if num_shards == 1:
subdiv_offsets = [0]
elif num_shards == 2:
if num_devices > 1:
subdiv_offsets = [0, -(num_devices // 2)]
else:
subdiv_offsets = [0]
else:
raise ValueError('Unsupported num_shards %d' % num_shards)
for d in range(num_devices):
with tf.device(devices[d]):
reduce_op = collective_ops.all_reduce(input_tensors[d],
group_size, group_key, instance_key,
red_op, un_op,
subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key):
return collective_ops.broadcast_send(t, shape, dtype, group_size, group_key,
instance_key)
def broadcast_recv(shape, dtype, group_size, group_key, instance_key):
return collective_ops.broadcast_recv(shape, dtype, group_size, group_key,
instance_key)
def sum_grad_and_var_all_reduce(single_session,
grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'collective':
assert not single_session
summed_grads = build_collective_reduce(
scaled_grads, num_workers, num_shards, 'Add', 'Id')
else:
with tf.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
if alg == 'nccl':
summed_grads = all_reduce.build_nccl_all_reduce(scaled_grads, tf.add)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, tf.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
tf.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, tf.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, tf.add, tf.add_n)
elif alg == 'pscpu/pscpu':
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads,
aux_devices,
# TODO(tucker): devise a way of better specifying the device set
# for the second level.
[aux_devices[0]],
tf.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, tf.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def contains_any(haystack, needles):
"""Tests if any needle is a substring of haystack.
Args:
haystack: a string
needles: list of strings
Returns:
True if any element of needles is a substring of haystack,
False otherwise.
"""
for n in needles:
if n in haystack:
return True
return False
def sum_gradients_all_reduce(single_session,
dev_prefixes,
tower_grads,
num_workers,
alg,
num_shards,
gpu_indices,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10,
allreduce_merge_scope=1):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
single_session: true if reduction is applied to one graph across
all workers, false if ths application is to a single-worker graph only.
dev_prefixes: list of prefix strings to use to generate PS device names.
tower_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
agg_small_grads_max_bytes: largest tensor eligible for aggregation,
in number of bytes.
agg_small_grads_max_group: largest permitted aggregation of small
tensors.
allreduce_merge_scope: size of groups into which to partition consecutive
gradients grouped under a common 'allreduce' name scope for application
of ScopedAllocator optimization.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = contains_any(alg, ['pscpu', 'psgpu'])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
aux_device_groups = group_device_names(
aux_devices,
num_shards if (alg != 'collective' and alg_contains_shuffle) else 1)
group_index = 0
if agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0:
tower_grads, packing = pack_small_tensors(
tower_grads,
max_bytes=agg_small_grads_max_bytes,
max_group=agg_small_grads_max_group)
else:
packing = None
reduced_gv_list = []
gv = list(zip(*tower_grads))
merge_scope = allreduce_merge_scope if allreduce_merge_scope > 0 else 1
chunked_gv = [gv[x:x + merge_scope]
for x in xrange(0, len(gv), merge_scope)]
for chunk in chunked_gv:
with tf.name_scope('allreduce'):
for grad_and_vars in chunk:
reduced_gv_list.append(sum_grad_and_var_all_reduce(
single_session,
grad_and_vars, num_workers, alg, gpu_indices,
(aux_devices if is_hierarchical
else aux_device_groups[group_index]),
num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_tower_grads = [list(x) for x in zip(*reduced_gv_list)]
if packing:
new_tower_grads = unpack_small_tensors(new_tower_grads, packing)
return new_tower_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one tower.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with tf.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with tf.device(g.device):
members.append(tf.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with tf.device(members[0].device):
return tf.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope('unpack'):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(tower_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
tower_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_tower_grads, packing where new_tower_grads is identical to
tower_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each tower, and packing contains
the data necessary to restore the tower_grads structure.
Look through the first tower for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in towers must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(tower_grads[0]):
if g.dtype == tf.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(tower_grads[0])
packing = {}
if small_ranges:
new_tower_grads = []
for dev_idx, gv_list in enumerate(tower_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_tower_grads.append(new_gv_list)
return new_tower_grads, packing
else:
return tower_grads, None
def unpack_small_tensors(tower_grads, packing):
"""Undo the structure alterations to tower_grads done by pack_small_tensors.
Args:
tower_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to tower_grads.
Returns:
new_tower_grads: identical to tower_grads except that concatentations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return tower_grads
new_tower_grads = []
num_devices = len(tower_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(tower_grads):
new_gv_list = gv_list[num_packed:]
for i in xrange(0, num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_tower_grads.append(new_gv_list)
return new_tower_grads
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/allreduce.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants used in tf_cnn_benchmarks."""
from enum import Enum
class NetworkTopology(str, Enum):
"""Network topology describes how multiple GPUs are inter-connected.
"""
# DGX-1 uses hybrid cube mesh topology with the following device peer to peer
# matrix:
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
DGX1 = "dgx1"
# V100 in GCP are connected with the following device peer to peer matrix.
# In this topology, bandwidth of the connection depends on if it uses NVLink
# or PCIe link.
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y N Y N N
# 1: Y Y Y Y N N N N
# 2: Y Y Y Y N N N Y
# 3: Y Y Y Y N N N N
# 4: N N N N Y Y Y Y
# 5: Y N N N Y Y Y Y
# 6: N N N N Y Y Y Y
# 7: N N Y N Y Y Y Y
GCP_V100 = "gcp_v100"
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/constants.py |
DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/__init__.py |
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines VariableMgr and subclasses used to manage variables.
"""
from __future__ import print_function
import re
import tensorflow as tf
from utils import misc_utils
from variable_mgr import allreduce
from variable_mgr import batch_allreduce
from variable_mgr import variable_mgr_util
class VariableMgr(object):
"""Abstract superclass for class used by BenchmarkCNN to control variables.
Functions on this class are used to control how variables are created and
managed, and how gradients are computed and applied.
"""
def __init__(self, benchmark_cnn):
self.benchmark_cnn = benchmark_cnn
self.staging_delta_ops = []
self.use_resource_vars = benchmark_cnn.params.use_resource_vars
# A variable for automatic loss scaling.
self.grad_has_inf_nan = None
def each_tower_has_variables(self):
"""Returns True if each GPU tower of the model has separate variables."""
assert False, 'Must be implemented in subclass'
def supports_staged_vars(self):
"""Whether staged variable management is supported."""
return False
def create_outer_variable_scope(self, device_num):
"""Create the tf.variable_scope around all model graph operations."""
del device_num # unused by this implementation
assert False, 'Must be implemented in subclass'
def preprocess_device_grads(self, device_grads):
"""Preprocess the device gradients prior to applying them.
Args:
device_grads: List of lists of (gradient, variable) tuples.
device_grads[t][g] = (gradient, variable), where t is the index of the
tower and g is the index of the gradient-variable pair.
Returns: a tuple of (apply_gradients_devices, gradient_state).
gradient_state is an opaque structure that should be passed to
get_gradients_to_apply() and append_apply_gradients_ops() (in that order).
apply_gradients_devices is a list of devices where the gradients will be
applied with get_gradients_to_apply() and append_apply_gradients_ops().
"""
del device_grads # unused by this implementation
assert False, 'Must be implemented in subclass'
def get_gradients_to_apply(self, device_num, gradient_state):
"""Returns the [(gradient, variable)] list to apply for device_num.
Args:
device_num: indexes into apply_gradients_devices, which was returned by an
earlier call to preprocess_device_grads.
gradient_state: from previous call to apply_gradients_devices.
"""
del device_num, gradient_state # unused by this implementation
assert False, 'Must be implemented in subclass'
def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops,
loss_scale_params):
"""Adds training ops for grads to 'training_ops'.
Args:
gradient_state: from previous call to apply_gradients_devices.
opt: the underlying optimizer
grads: [(grad, var)] to apply
training_ops: list to which to add ops
loss_scale_params: parameters for loss scaling.
"""
del gradient_state # unused by this implementation
def get_apply_gradients_ops_func():
"""Returns the apply_gradients op."""
return [opt.apply_gradients(grads)]
variable_mgr_util.append_gradients_with_loss_scale(
training_ops, get_apply_gradients_ops_func, loss_scale_params,
self.grad_has_inf_nan)
def get_post_init_ops(self):
"""Returns ops that should run post-initialization."""
return []
def get_devices(self):
"""Returns devices to use for computation; includes replica selection."""
assert False, 'Must be implemented in subclass'
def savable_variables(self):
"""Returns a list/dict of savable variables to pass to tf.train.Saver."""
return tf.global_variables()
def trainable_variables_on_device(self,
rel_device_num,
abs_device_num,
writable=False):
"""Return the set of trainable variables on device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether to get a reference to the underlying variable.
Returns:
The set of trainable variables on the specified device.
"""
del rel_device_num, writable
if self.each_tower_has_variables():
params = [
v for v in tf.trainable_variables()
if v.name.startswith('v%s/' % abs_device_num)
]
else:
params = tf.trainable_variables()
return params
class VariableMgrLocalReplicated(VariableMgr):
"""VariableMgr that implements the --replicated mode for local jobs.
Each GPU has its own copy of the variables. To apply gradients,
either a local all-reduce algorithm is applied or a regular
cross-device aggregation is used to replicate the combined
gradients to all towers.
"""
def __init__(self, benchmark_cnn, all_reduce_spec,
agg_small_grads_max_bytes, agg_small_grads_max_group,
allreduce_merge_scope):
super(VariableMgrLocalReplicated, self).__init__(benchmark_cnn)
if all_reduce_spec:
spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
if len(spec) != 1:
raise ValueError(
'replicated mode does not support hybrid all-reduce strategies')
self._all_reduce_spec = spec[0]
else:
self._all_reduce_spec = None
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
self._warmup_ops = []
self._allreduce_merge_scope = allreduce_merge_scope
self._gradient_put_ops = None
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
return tf.variable_scope('v%s' % device_num,
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
compact_grads = (self.benchmark_cnn.params.use_fp16 and
self.benchmark_cnn.params.compact_gradient_transfer)
defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed')
grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads]
algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params)
reduced_grads, self._warmup_ops = algorithm.batch_all_reduce(
grads_to_reduce, self.benchmark_cnn.params.gradient_repacking,
compact_grads, defer_grads)
assert not self._warmup_ops
if (self.benchmark_cnn.params.use_fp16 and
self.benchmark_cnn.enable_auto_loss_scale):
# Check for infs or nans
is_finite_list = []
with tf.name_scope('check_for_inf_and_nan'):
for tower_grads in reduced_grads:
with tf.colocate_with(tower_grads[0]):
# TODO(tanmingxing): Create fused op that takes in a list of tensors
# as input and returns scalar boolean True if there are any
# infs/nans.
is_finite_list.append(tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in tower_grads]))
self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list))
reduced_device_grads = [[
(g, v) for g, (_, v) in zip(grads, grad_vars)
] for grads, grad_vars in zip(reduced_grads, device_grads)]
return self.benchmark_cnn.devices, reduced_device_grads
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
return device_grads[device_num]
def get_post_init_ops(self):
# Copy initialized values for variables on GPU 0 to other GPUs.
global_vars = tf.global_variables()
var_by_name = dict([(v.name, v) for v in global_vars])
post_init_ops = []
copy_froms = set()
skipped_vars = []
for v in global_vars:
split_name = v.name.split('/')
# TODO(b/62630508): use more specific prefix than v or v0.
if split_name[0] == 'v0' or not v.name.startswith('v'):
skipped_vars.append(v)
continue
# Only vars starts with "v[number]" are synced.
split_name[0] = 'v0'
copy_from = var_by_name['/'.join(split_name)]
copy_froms.add(copy_from)
post_init_ops.append(v.assign(copy_from.read_value()))
post_init_ops += self._warmup_ops
# If copy-froms is empty, then all vars are actually saved.
misc_utils.print_out('All copy-from vars(%d): ' % len(copy_froms))
for gv in copy_froms:
misc_utils.print_out(gv.name)
misc_utils.print_out('All skippped vars(%d): ' % len(skipped_vars))
for gv in skipped_vars:
misc_utils.print_out(gv.name)
assert len(skipped_vars) >= len(copy_froms)
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/variable_mgr/variable_mgr.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import collections
import six
import os
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import gen_nn_ops
def sparse_softmax_crossent_with_logits(logits=None, labels=None, name=None):
"""docstring."""
# TODO(jamesqin): merge with tf.nn.sparse_softmax_cross_entropy_with_logits
# Basically forks the tf lib function, only that the result isn't casted
# back to tf.float16 if the input is tf.float16
# TODO(jamesqin): implement a fused kernel to reduce memory footprint.
# Reshape logits and labels to rank 2.
with tf.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = tf.convert_to_tensor(labels)
logits = tf.convert_to_tensor(logits)
precise_logits = tf.cast(logits, tf.float32) if (tf.as_dtype(
logits.dtype) == tf.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = tf.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# cost.dtype is always fp32
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
xla_compile = (os.environ["xla_compile"] == "true")
use_xla = (os.environ["use_xla"] == "true")
if not (xla_compile or use_xla):
# Assert isn't registered w/ GPU, not working w/ xla.compile()
shape_checks.append(
tf.assert_equal(
tf.shape(labels),
tf.shape(logits)[:-1]))
with tf.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = tf.shape(logits)[tf.rank(logits) - 1]
precise_logits = tf.reshape(precise_logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = tf.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
# cost is always fp32
return cost
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Custom version of tf.clip_by_global_norm that doesn't check numerics."""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = tf.global_norm(t_list, name)
with tf.name_scope(name, "clip_by_global_norm", t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * tf.minimum(
1.0 / use_norm,
tf.constant(1.0, dtype=use_norm.dtype) / clip_norm)
values = [
tf.convert_to_tensor(
t.values if isinstance(t, tf.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with tf.colocate_with(v):
values_clipped.append(
tf.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
tf.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, tf.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
def BatchMatMul(a, b):
use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true")
xla_compile = (os.environ["xla_compile"] == "true")
if use_fp32_batch_matmul:
def DoFn(a, b):
dtype = a.dtype
a = tf.to_float(a)
b = tf.to_float(b)
return tf.cast(tf.matmul(a, b), dtype)
# If using xla_compile, the fwd and bak per tower are wrapped in xla_compile
if not xla_compile:
DoFn = function.Defun(noinline=True)(DoFn)
res = DoFn(a, b)
res.set_shape((None, None, b.shape[-1].value))
else:
# If xla_compile, leave to xla to handle the casts.
res = DoFn(a, b)
else:
res = tf.matmul(a, b)
return res
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/math_utils.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import checkpoint_management as cm
def get_all_checkpoints(output_dir):
"""docstring."""
ckpt = cm.get_checkpoint_state(output_dir, None)
res = []
if not ckpt:
return None
for path in ckpt.all_model_checkpoint_paths:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = cm._prefix_to_checkpoint_path(path, saver_pb2.SaverDef.V2)
v1_path = cm._prefix_to_checkpoint_path(path, saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
res.append(path)
else:
tf.logging.error("Couldn't match files for checkpoint %s", path)
return res
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/evaluation_utils.py |
DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/__init__.py |
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import codecs
import collections
import json
import math
import os
import sys
import time
from distutils import version
import tensorflow as tf
def check_tensorflow_version():
# LINT.IfChange
min_tf_version = "1.3.0"
# LINT
if (version.LooseVersion(tf.__version__) <
version.LooseVersion(min_tf_version)):
raise EnvironmentError("Tensorflow version must >= %s" % min_tf_version)
def weighted_avg(inputs, weights, force_fp32=False):
dtype = tf.float32 if force_fp32 else inputs[0].dtype
inputs = [tf.cast(x, dtype) for x in inputs]
weights = [tf.cast(x, dtype) for x in weights]
norm = tf.add_n([x * y for x, y in zip(inputs, weights)])
denorm = tf.add_n(weights)
return norm / denorm
def safe_exp(value):
"""Exponentiation with catching of overflow error."""
try:
ans = math.exp(value)
except OverflowError:
ans = float("inf")
return ans
def print_time(s, start_time):
"""Take a start time, print elapsed duration, and return a new time."""
print("%s, time %ds, %s." % (s, (time.time() - start_time), time.ctime()))
sys.stdout.flush()
return time.time()
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
if isinstance(s, bytes):
s = s.decode("utf-8")
if f:
f.write(s)
if new_line:
f.write(u"\n")
# stdout
out_s = s.encode("utf-8")
if not isinstance(out_s, str):
out_s = out_s.decode("utf-8")
print(out_s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def print_hparams(hparams, skip_patterns=None, header=None):
"""Print hparams, can skip keys based on pattern."""
if header: print_out("%s" % header)
values = hparams.values()
for key in sorted(values.keys()):
if not skip_patterns or all(
[skip_pattern not in key for skip_pattern in skip_patterns]):
print_out(" %s=%s" % (key, str(values[key])))
def serialize_hparams(hparams):
"""Print hparams, can skip keys based on pattern."""
values = hparams.values()
res = ""
for key in sorted(values.keys()):
res += "%s=%s\n" % (key, str(values[key]))
return res
def load_hparams(model_dir):
"""Load hparams from an existing model directory."""
hparams_file = os.path.join(model_dir, "hparams")
if tf.gfile.Exists(hparams_file):
print_out("# Loading hparams from %s" % hparams_file)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_file, "rb")) as f:
try:
hparams_values = json.load(f)
hparams = tf.contrib.training.HParams(**hparams_values)
except ValueError:
print_out(" can't load hparams file")
return None
return hparams
else:
return None
def maybe_parse_standard_hparams(hparams, hparams_path):
"""Override hparams values with existing standard hparams config."""
if hparams_path and tf.gfile.Exists(hparams_path):
print_out("# Loading standard hparams from %s" % hparams_path)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_path, "rb")) as f:
hparams.parse_json(f.read())
return hparams
def save_hparams(output_dir, hparams):
"""Save hparams."""
hparams_file = os.path.join(output_dir, "hparams")
print_out(" saving hparams to %s" % hparams_file)
with codecs.getwriter("utf-8")(tf.gfile.GFile(hparams_file, "wb")) as f:
f.write(hparams.to_json(indent=4, sort_keys=True))
def debug_tensor(s, msg=None, summarize=10):
"""Print the shape and value of a tensor at test time. Return a new tensor."""
if not msg:
msg = s.name
return tf.Print(s, [tf.shape(s), s], msg + " ", summarize=summarize)
def add_summary(summary_writer, global_step, tag, value):
"""Add a new summary to the current summary_writer."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary_writer.add_summary(summary, global_step)
def format_text(words):
"""Convert a sequence words into sentence."""
if (not hasattr(words, "__len__") and # for numpy array
not isinstance(words, collections.Iterable)):
words = [words]
return b" ".join(words)
def format_bpe_text(symbols, delimiter=b"@@"):
"""Convert a sequence of bpe words into sentence."""
words = []
word = b""
if isinstance(symbols, str):
symbols = symbols.encode()
delimiter_len = len(delimiter)
for symbol in symbols:
if len(symbol) >= delimiter_len and symbol[-delimiter_len:] == delimiter:
word += symbol[:-delimiter_len]
else: # end of a word
word += symbol
words.append(word)
word = b""
return b" ".join(words)
def format_spm_text(symbols):
"""Decode a text in SPM (https://github.com/google/sentencepiece) format."""
return u"".join(format_text(symbols).decode("utf-8").split()).replace(
u"\u2581", u" ").strip().encode("utf-8")
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/misc_utils.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For loading data into NMT models."""
from __future__ import print_function
import os
import tensorflow as tf
from utils import vocab_utils
def get_effective_epoch_size(hparams, train=True):
"""Get training epoch size after filtering."""
if train:
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_max_len = hparams.src_max_len
tgt_max_len = hparams.tgt_max_len
else:
src_file = "%s.%s" % (hparams.test_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.test_prefix, hparams.tgt)
src_max_len = hparams.src_max_len_infer
tgt_max_len = None
if src_max_len is None:
src_max_len = float('inf')
if tgt_max_len is None:
tgt_max_len = float('inf')
srcf = tf.gfile.GFile(src_file, "r")
tgtf = tf.gfile.GFile(tgt_file, "r")
epoch_size = 0
src_tokens = 0
tgt_tokens = 0
for srcline, tgtline in zip(srcf, tgtf):
len_srcline = len(srcline.split())
len_tgtline = len(tgtline.split())
if (
len_srcline < src_max_len and
len_tgtline < tgt_max_len):
epoch_size += 1
src_tokens += len_srcline
tgt_tokens += len_tgtline
srcf.close()
tgtf.close()
return epoch_size, src_tokens, tgt_tokens
# pylint: disable=g-long-lambda,line-too-long
def get_iterator(src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size,
sos,
eos,
random_seed,
num_buckets,
src_max_len=None,
tgt_max_len=None,
num_parallel_calls=4,
output_buffer_size=None,
skip_count=None,
num_shards=1,
shard_index=0,
reshuffle_each_iteration=True,
use_char_encode=False,
num_repeat=1,
filter_oversized_sequences=False):
"""Function that returns input dataset."""
if not output_buffer_size:
output_buffer_size = batch_size * 1000
if use_char_encode:
src_eos_id = vocab_utils.EOS_CHAR_ID
else:
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed,
reshuffle_each_iteration).repeat(num_repeat)
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Filter zero length input sequences.
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
# Filter oversized input sequences.
if filter_oversized_sequences:
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) < src_max_len,
tf.size(tgt) < tgt_max_len))
if src_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src[:src_max_len], tgt),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
if tgt_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src, tgt[:tgt_max_len]),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
if use_char_encode:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.reshape(vocab_utils.tokens_to_bytes(src), [-1]),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
else:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src,
tf.concat(([tgt_sos_id], tgt), 0),
tf.concat((tgt, [tgt_eos_id]), 0)),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Add in sequence lengths.
if use_char_encode:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out,
tf.to_int32(tf.size(src) / vocab_utils.DEFAULT_CHAR_MAXLEN),
tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
else:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
use_xla_compile = os.environ["xla_compile"] == "true"
force_inputs_padding = os.environ["force_inputs_padding"] == "true"
use_static_input_shape = use_xla_compile or force_inputs_padding
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape(
[src_max_len if use_static_input_shape else None]), # src
tf.TensorShape(
[tgt_max_len if use_static_input_shape else None]), # tgt_input
tf.TensorShape([tgt_max_len if use_static_input_shape else None
]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
tgt_eos_id, # tgt_input
tgt_eos_id, # tgt_output
0, # src_len -- unused
0),
drop_remainder=True)
if num_buckets > 1:
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
"""Calculate bucket_width by maximum source sequence length."""
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
batched_dataset = src_tgt_dataset.apply(
tf.contrib.data.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
# Make_one_shot_iterator is not applicable here since we have lookup table.
# Instead return a tf.data.dataset and let TpuEstimator to initialize and make
# iterator out of it.
batched_dataset = batched_dataset.map(
lambda src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
return batched_dataset
def get_infer_iterator(src_dataset,
src_vocab_table,
batch_size,
eos,
src_max_len=None,
use_char_encode=False):
"""Get dataset for inference."""
if use_char_encode:
src_eos_id = vocab_utils.EOS_CHAR_ID
else:
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
if src_max_len:
src_dataset = src_dataset.map(lambda src: src[:src_max_len])
if use_char_encode:
# Convert the word strings to character ids
src_dataset = src_dataset.map(
lambda src: tf.reshape(vocab_utils.tokens_to_bytes(src), [-1]))
else:
# Convert the word strings to ids
src_dataset = src_dataset.map(
lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
# Add in the word counts.
if use_char_encode:
src_dataset = src_dataset.map(
lambda src: (src,
tf.to_int32(
tf.size(src) / vocab_utils.DEFAULT_CHAR_MAXLEN)))
else:
src_dataset = src_dataset.map(lambda src: (src, tf.size(src)))
def batching_func(x):
return x.padded_batch(
batch_size,
# The entry is the source line rows;
# this has unknown-length vectors. The last entry is
# the source row size; this is a scalar.
padded_shapes=(
tf.TensorShape([None]), # src
tf.TensorShape([])), # src_len
# Pad the source sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
0)) # src_len -- unused
batched_dataset = batching_func(src_dataset)
batched_dataset = batched_dataset.map(
lambda src_ids, src_seq_len: (
{"source": src_ids,
"source_sequence_length": src_seq_len}))
return batched_dataset
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/iterator_utils.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to handle vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import os
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from utils import misc_utils as utils
# word level special token
UNK = "<unk>"
SOS = "<s>"
EOS = "</s>"
UNK_ID = 0
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
BOS_CHAR_ID = 256 # <begin sentence>
EOS_CHAR_ID = 257 # <end sentence>
BOW_CHAR_ID = 258 # <begin word>
EOW_CHAR_ID = 259 # <end word>
PAD_CHAR_ID = 260 # <padding>
DEFAULT_CHAR_MAXLEN = 50 # max number of chars for each word.
def _string_to_bytes(text, max_length):
"""Given string and length, convert to byte seq of at most max_length.
This process mimics docqa/elmo's preprocessing:
https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py
Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py &
our usage differs from docqa/elmo.
Args:
text: tf.string tensor of shape []
max_length: max number of chars for each word.
Returns:
A tf.int32 tensor of the byte encoded text.
"""
byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8))
byte_ids = byte_ids[:max_length - 2]
padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID)
byte_ids = tf.concat(
[[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0)
tf.logging.info(byte_ids)
byte_ids = tf.reshape(byte_ids, [max_length])
tf.logging.info(byte_ids.get_shape().as_list())
return byte_ids + 1
def tokens_to_bytes(tokens):
"""Given a sequence of strings, map to sequence of bytes.
Args:
tokens: A tf.string tensor
Returns:
A tensor of shape words.shape + [bytes_per_word] containing byte versions
of each word.
"""
bytes_per_word = DEFAULT_CHAR_MAXLEN
with tf.device("/cpu:0"):
tf.assert_rank(tokens, 1)
shape = tf.shape(tokens)
tf.logging.info(tokens)
tokens_flat = tf.reshape(tokens, [-1])
as_bytes_flat = tf.map_fn(
fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word),
elems=tokens_flat,
dtype=tf.int32,
back_prop=False)
tf.logging.info(as_bytes_flat)
as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word])
return as_bytes
def load_vocab(vocab_file):
vocab = []
with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f:
vocab_size = 0
for word in f:
vocab_size += 1
vocab.append(word.strip())
return vocab, vocab_size
def check_vocab(vocab_file, output_dir, check_special_token=True, sos=None,
eos=None, unk=None, pad_vocab=False):
"""Check if vocab_file doesn't exist, create from corpus_file."""
if tf.gfile.Exists(vocab_file):
utils.print_out("# Vocab file %s exists" % vocab_file)
vocab, vocab_size = load_vocab(vocab_file)
if check_special_token:
# Verify if the vocab starts with unk, sos, eos
# If not, prepend those tokens & generate a new vocab file
if not unk: unk = UNK
if not sos: sos = SOS
if not eos: eos = EOS
assert len(vocab) >= 3
if vocab[0] != unk or vocab[1] != sos or vocab[2] != eos:
utils.print_out("The first 3 vocab words [%s, %s, %s]"
" are not [%s, %s, %s]" %
(vocab[0], vocab[1], vocab[2], unk, sos, eos))
vocab = [unk, sos, eos] + vocab
vocab_size += 3
new_vocab_file = os.path.join(output_dir, os.path.basename(vocab_file))
with codecs.getwriter("utf-8")(
tf.gfile.GFile(new_vocab_file, "wb")) as f:
for word in vocab:
f.write("%s\n" % word)
vocab_file = new_vocab_file
if pad_vocab == True and vocab_size % 8 != 0:
new_vocab_file = os.path.join(output_dir, os.path.basename(vocab_file))
padded_vocab_size = ((vocab_size + 8 - 1)// 8) * 8
for i in range(0, padded_vocab_size - vocab_size):
token = "<madeupword" + str(i) + ">"
vocab.append(token)
with codecs.getwriter("utf-8")(
tf.gfile.GFile(new_vocab_file, "wb")) as f:
for word in vocab:
f.write("%s\n" % word)
vocab_file = new_vocab_file
else:
raise ValueError("vocab_file '%s' does not exist." % vocab_file)
vocab_size = len(vocab)
return vocab_size, vocab_file
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
"""Creates vocab tables for src_vocab_file and tgt_vocab_file."""
src_vocab_table = lookup_ops.index_table_from_file(
src_vocab_file, default_value=UNK_ID)
if share_vocab:
tgt_vocab_table = src_vocab_table
else:
tgt_vocab_table = lookup_ops.index_table_from_file(
tgt_vocab_file, default_value=UNK_ID)
return src_vocab_table, tgt_vocab_table
def load_embed_txt(embed_file):
"""Load embed_file into a python dictionary.
Note: the embed_file should be a Glove/word2vec formatted txt file. Assuming
Here is an exampe assuming embed_size=5:
the -0.071549 0.093459 0.023738 -0.090339 0.056123
to 0.57346 0.5417 -0.23477 -0.3624 0.4037
and 0.20327 0.47348 0.050877 0.002103 0.060547
For word2vec format, the first line will be: <num_words> <emb_size>.
Args:
embed_file: file path to the embedding file.
Returns:
a dictionary that maps word to vector, and the size of embedding dimensions.
"""
emb_dict = dict()
emb_size = None
is_first_line = True
with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f:
for line in f:
tokens = line.rstrip().split(" ")
if is_first_line:
is_first_line = False
if len(tokens) == 2: # header line
emb_size = int(tokens[1])
continue
word = tokens[0]
vec = list(map(float, tokens[1:]))
emb_dict[word] = vec
if emb_size:
if emb_size != len(vec):
utils.print_out(
"Ignoring %s since embeding size is inconsistent." % word)
del emb_dict[word]
else:
emb_size = len(vec)
return emb_dict, emb_size
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/vocab_utils.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions specifically for NMT."""
from __future__ import print_function
import codecs
import time
import numpy as np
import tensorflow as tf
from utils import misc_utils as utils
__all__ = ["get_translation"]
def get_translation(nmt_outputs, sent_id, tgt_eos, subword_option):
"""Given batch decoding outputs, select a sentence and turn to text."""
if tgt_eos: tgt_eos = tgt_eos.encode("utf-8")
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
# If there is an eos symbol in outputs, cut them at that point.
if tgt_eos and tgt_eos in output:
output = output[:output.index(tgt_eos)]
if subword_option == "bpe": # BPE
translation = utils.format_bpe_text(output)
elif subword_option == "spm": # SPM
translation = utils.format_spm_text(output)
else:
translation = utils.format_text(output)
return translation, len(output)
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/utils/nmt_utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import sys
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser(description='Translate')
parser.add_argument('--executable', default='nmt.py', help='path to nmt.py')
parser.add_argument('--infer_batch_size', metavar='B1,[B2,...]',
default='64', help='batch sizes separated by comma')
parser.add_argument('--beam_width', metavar='W1,[W2,...]',
default='5', help='beam widths separated by comma')
args, other_args = parser.parse_known_args()
batch_sizes = list(map(int, args.infer_batch_size.split(',')))
beam_widths = list(map(int, args.beam_width.split(',')))
def pr(*args, column_len=14):
for arg in args:
if type(arg) is float:
arg = '{:.2f}'.format(arg)
arg = str(arg)
print('', arg.ljust(column_len), end=' |')
print()
pr('batch size', 'beam width', 'bleu', 'sentences/sec', 'tokens/sec',
'latency_avg', 'latency_50', 'latency_90', 'latency_95', 'latency_99', 'latency_100')
for batch_size in batch_sizes:
for beam_width in beam_widths:
cmd = ['python', args.executable, '--beam_width', str(beam_width),
'--infer_batch_size', str(batch_size), '--mode', 'infer'] + other_args
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
bleu_search_res = re.search(rb'\nbleu is ((\d|.)+)', out)
speed_search_res = re.search(
rb'\neval time for ckpt: ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', out)
latencies = []
for lat in ['avg', '50', '90', '95', '99', '100']:
latencies.append(re.search(r'\neval latency_{} for ckpt: ((\d|.)+) ms'.format(lat).encode(), out))
if bleu_search_res is None or speed_search_res is None or any(filter(lambda x: x is None, latencies)):
print('AN ERROR OCCURRED WHILE RUNNING:', cmd, file=sys.stderr)
print('-' * 20, 'STDOUT', '-' * 20, file=sys.stderr)
print(out.decode())
print('-' * 20, 'STDERR', '-' * 20, file=sys.stderr)
print(err.decode())
exit(1)
bleu = float(bleu_search_res.group(1))
sentences_per_sec, tokens_per_sec = map(float, speed_search_res.group(3, 5))
latencies = list(map(lambda x: float(x.group(1)), latencies))
pr(batch_size, beam_width, bleu, sentences_per_sec, tokens_per_sec, *latencies)
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/scripts/translate.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import sys
import json
from pathlib import Path
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser(description='Parse training logs')
parser.add_argument('log', help='path to log file', type=Path)
args = parser.parse_args()
content = args.log.read_bytes()
bleu = list(map(lambda x: float(x[0]), re.findall(rb'\nbleu is ((\d|.)+)', content)))
training_speed = re.findall(rb'\ntraining time for epoch (\d+): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
training_tokens = list(map(lambda x: float(x[5]), training_speed))
training_sentences = list(map(lambda x: float(x[3]), training_speed))
eval_speed = re.findall(rb'\neval time for epoch (\d+): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
if not eval_speed:
eval_speed = re.findall(rb'\neval time for ckpt(): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
eval_tokens = list(map(lambda x: float(x[5]), eval_speed))
eval_sentences = list(map(lambda x: float(x[3]), eval_speed))
experiment_duration = float(re.findall(rb'\nExperiment took ((\d|.)+) min', content)[0][0])
ret = {}
ret['bleu'] = bleu
ret['training_tokens_per_sec'] = training_tokens
ret['training_sentences_per_sec'] = training_sentences
ret['eval_tokens_per_sec'] = eval_tokens
ret['eval_sentences_per_sec'] = eval_sentences
ret['duration'] = experiment_duration
print(json.dumps(ret))
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/scripts/parse_log.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import Counter
def parse_args():
parser = argparse.ArgumentParser(description='Clean dataset')
parser.add_argument('-f1', '--file1', help='file1')
parser.add_argument('-f2', '--file2', help='file2')
return parser.parse_args()
def save_output(fname, data):
with open(fname, 'w') as f:
f.writelines(data)
def main():
"""
Discards all pairs of sentences which can't be decoded by latin-1 encoder.
It aims to filter out sentences with rare unicode glyphs and pairs which
are most likely not valid English-German sentences.
Examples of discarded sentences:
✿★★★Hommage au king de la pop ★★★✿ ✿★★★Que son âme repos...
Для их осуществления нам, прежде всего, необходимо преодолеть
возражения рыночных фундаменталистов, которые хотят ликвидировать или
уменьшить роль МВФ.
practised as a scientist in various medical departments of the ⇗Medical
University of Hanover , the ⇗University of Ulm , and the ⇗RWTH Aachen
(rheumatology, pharmacology, physiology, pathology, microbiology,
immunology and electron-microscopy).
The same shift】 and press 【】 【alt out with a smaller diameter
circle.
Brought to you by ABMSUBS ♥leira(Coordinator/Translator)
♥chibichan93(Timer/Typesetter) ♥ja...
Some examples: &0u - ☺ &0U - ☻ &tel - ☏ &PI - ¶ &SU - ☼ &cH- - ♥ &M2=♫
&sn - ﺵ SGML maps SGML to unicode.
"""
args = parse_args()
c = Counter()
skipped = 0
valid = 0
data1 = []
data2 = []
with open(args.file1) as f1, open(args.file2) as f2:
for idx, lines in enumerate(zip(f1, f2)):
line1, line2 = lines
if idx % 100000 == 1:
print('Processed {} lines'.format(idx))
try:
line1.encode('latin1')
line2.encode('latin1')
except UnicodeEncodeError:
skipped += 1
else:
data1.append(line1)
data2.append(line2)
valid += 1
c.update(line1)
ratio = valid / (skipped + valid)
print('Skipped: {}, Valid: {}, Valid ratio {}'.format(skipped, valid, ratio))
print('Character frequency:', c)
save_output(args.file1, data1)
save_output(args.file2, data2)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Translation/GNMT/scripts/filter_dataset.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""
Usage:
python export_saved_model.py \
--activation_fn='relu' \
--batch_size=16 \
--data_format='NCHW' \
--input_dtype="fp32" \
--export_dir="exported_models" \
--model_checkpoint_path="path/to/checkpoint/model.ckpt-2500" \
--unet_variant='tinyUNet' \
--xla \
--amp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import pprint
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from dllogger.logger import LOGGER
from model.unet import UNet_v1
from model.blocks.activation_blck import authorized_activation_fn
from utils.cmdline_helper import _add_bool_argument
def get_export_flags():
parser = argparse.ArgumentParser(description="JoC-UNet_v1-TF-ExportFlags")
parser.add_argument('--export_dir', default=None, required=True, type=str, help='The export directory.')
parser.add_argument('--model_checkpoint_path', default=None, required=True, help='Checkpoint path.')
parser.add_argument(
'--data_format',
choices=['NHWC', 'NCHW'],
type=str,
default="NCHW",
required=False,
help="""Which Tensor format is used for computation inside the mode"""
)
parser.add_argument(
'--input_dtype',
choices=['fp32', 'fp16'],
type=str,
default="fp32",
required=False,
help="""Tensorflow dtype of the input tensor"""
)
parser.add_argument(
'--unet_variant',
default="tinyUNet",
choices=UNet_v1.authorized_models_variants,
type=str,
required=False,
help="""Which model size is used. This parameter control directly the size and the number of parameters"""
)
parser.add_argument(
'--activation_fn',
choices=authorized_activation_fn,
type=str,
default="relu",
required=False,
help="""Which activation function is used after the convolution layers"""
)
_add_bool_argument(
parser=parser,
name="amp",
default=False,
required=False,
help="Enable Automatic Mixed Precision Computation to maximise performance."
)
_add_bool_argument(
parser=parser,
name="xla",
default=False,
required=False,
help="Enable Tensorflow XLA to maximise performance."
)
parser.add_argument('--batch_size', default=16, type=int, help='Evaluation batch size.')
FLAGS, unknown_args = parser.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
return FLAGS
def export_model(RUNNING_CONFIG):
if RUNNING_CONFIG.amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
model = UNet_v1(
model_name="UNet_v1",
input_format="NHWC",
compute_format=RUNNING_CONFIG.data_format,
n_output_channels=1,
unet_variant=RUNNING_CONFIG.unet_variant,
weight_init_method="he_normal",
activation_fn=RUNNING_CONFIG.activation_fn
)
config_proto = tf.ConfigProto()
config_proto.allow_soft_placement = True
config_proto.log_device_placement = False
config_proto.gpu_options.allow_growth = True
if RUNNING_CONFIG.xla: # Only working on single GPU
LOGGER.log("XLA is activated - Experimental Feature")
config_proto.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config_proto.gpu_options.force_gpu_compatible = True # Force pinned memory
run_config = tf.estimator.RunConfig(
model_dir=None,
tf_random_seed=None,
save_summary_steps=1e9, # disabled
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=config_proto,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=1e9, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None
)
estimator = tf.estimator.Estimator(
model_fn=model,
model_dir=RUNNING_CONFIG.model_checkpoint_path,
config=run_config,
params={'debug_verbosity': 0}
)
LOGGER.log('[*] Exporting the model ...')
input_type = tf.float32 if RUNNING_CONFIG.input_dtype else tf.float16
def get_serving_input_receiver_fn():
input_shape = [RUNNING_CONFIG.batch_size, 512, 512, 1]
def serving_input_receiver_fn():
features = tf.placeholder(dtype=input_type, shape=input_shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features)
return serving_input_receiver_fn
export_path = estimator.export_saved_model(
export_dir_base=RUNNING_CONFIG.export_dir,
serving_input_receiver_fn=get_serving_input_receiver_fn(),
checkpoint_path=RUNNING_CONFIG.model_checkpoint_path
)
LOGGER.log('[*] Done! path: `%s`' % export_path.decode())
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.ERROR)
tf.disable_eager_execution()
flags = get_export_flags()
for endpattern in [".index", ".meta"]:
file_to_check = flags.model_checkpoint_path + endpattern
if not os.path.isfile(file_to_check):
raise FileNotFoundError("The checkpoint file `%s` does not exist" % file_to_check)
print(" ========================= Export Flags =========================\n")
pprint.pprint(dict(flags._get_kwargs()))
print("\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
export_model(flags)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/export_saved_model.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) Jonathan Dekhtiar - [email protected]
# All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
import os
import glob
import ntpath
import argparse
from collections import defaultdict
parser = argparse.ArgumentParser(description="DAGM2007_preprocessing")
parser.add_argument('--data_dir', required=True, type=str, help="Path to DAGM 2007 private dataset")
DEFECTIVE_COUNT = defaultdict(lambda: defaultdict(int))
EXPECTED_DEFECTIVE_SAMPLES_PER_CLASS = {
"Train": {
1: 79,
2: 66,
3: 66,
4: 82,
5: 70,
6: 83,
7: 150,
8: 150,
9: 150,
10: 150,
},
"Test": {
1: 71,
2: 84,
3: 84,
4: 68,
5: 80,
6: 67,
7: 150,
8: 150,
9: 150,
10: 150,
}
}
if __name__ == "__main__":
FLAGS, unknown_args = parser.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
if not os.path.exists(FLAGS.data_dir):
raise ValueError('The dataset directory received `%s` does not exists' % FLAGS.data_dir)
for challenge_id in range(10):
challenge_name = "Class%d" % (challenge_id + 1)
challenge_folder_path = os.path.join(FLAGS.data_dir, challenge_name)
print("[DAGM Preprocessing] Parsing Class ID: %02d ..." % (challenge_id + 1))
if not os.path.exists(challenge_folder_path):
raise ValueError('The folder `%s` does not exists' % challenge_folder_path)
for data_set in ["Train", "Test"]:
challenge_set_folder_path = os.path.join(challenge_folder_path, data_set)
if not os.path.exists(challenge_set_folder_path):
raise ValueError('The folder `%s` does not exists' % challenge_set_folder_path)
with open(os.path.join(challenge_folder_path, "%s_list.csv" % data_set.lower()), 'w') as data_list_file:
data_list_file.write('image_filepath,lbl_image_filepath,is_defective\n')
files = glob.glob(os.path.join(challenge_set_folder_path, "*.PNG"))
for file in files:
filepath, fullname = ntpath.split(file)
filename, extension = os.path.splitext(os.path.basename(fullname))
lbl_filename = "%s_label.PNG" % filename
lbl_filepath = os.path.join(filepath, "Label", lbl_filename)
if os.path.exists(lbl_filepath):
defective = True
else:
defective = False
lbl_filename = ""
if defective:
DEFECTIVE_COUNT[data_set][challenge_id + 1] += 1
data_list_file.write('%s,%s,%d\n' % (fullname, lbl_filename, defective))
if DEFECTIVE_COUNT[data_set][challenge_id +
1] != EXPECTED_DEFECTIVE_SAMPLES_PER_CLASS[data_set][challenge_id + 1]:
raise RuntimeError(
"There should be `%d` defective samples instead of `%d` in challenge (%s): %d" % (
DEFECTIVE_COUNT[data_set][challenge_id + 1],
EXPECTED_DEFECTIVE_SAMPLES_PER_CLASS[data_set][challenge_id + 1], data_set, challenge_id + 1
)
)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/preprocess_dagm2007.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import warnings
warnings.simplefilter("ignore")
import tensorflow as tf
import horovod.tensorflow as hvd
from utils import hvd_utils
from runtime import Runner
from utils.cmdline_helper import parse_cmdline
from utils.logging import init_dllogger
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ["TF_EXTRA_PTXAS_OPTIONS"] = "-sw200428197=true" # TODO: NINJA WAR
FLAGS = parse_cmdline()
init_dllogger(FLAGS.log_dir)
RUNNING_CONFIG = tf.contrib.training.HParams(
exec_mode=FLAGS.exec_mode,
save_eval_results_to_json=FLAGS.save_eval_results_to_json,
# ======= Directory HParams ======= #
log_dir=os.path.join(FLAGS.results_dir, "logs"),
model_dir=os.path.join(FLAGS.results_dir, "checkpoints"),
summaries_dir=os.path.join(FLAGS.results_dir, "summaries"),
sample_dir=os.path.join(FLAGS.results_dir, "samples"),
data_dir=FLAGS.data_dir,
dataset_name=FLAGS.dataset_name,
dataset_hparams=dict(),
# ========= Model HParams ========= #
unet_variant=FLAGS.unet_variant,
activation_fn=FLAGS.activation_fn,
input_format='NHWC',
compute_format=FLAGS.data_format,
input_shape=(512, 512),
mask_shape=(512, 512),
n_channels=1,
input_normalization_method="zero_one",
# ======== Runtime HParams ======== #
amp=FLAGS.amp,
xla=FLAGS.xla,
# ======= Training HParams ======== #
iter_unit=FLAGS.iter_unit,
num_iter=FLAGS.num_iter,
warmup_steps=FLAGS.warmup_step,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
learning_rate_decay_steps=FLAGS.learning_rate_decay_steps,
rmsprop_decay=FLAGS.rmsprop_decay,
rmsprop_momentum=FLAGS.rmsprop_momentum,
weight_decay=FLAGS.weight_decay,
use_auto_loss_scaling=FLAGS.use_auto_loss_scaling,
loss_fn_name=FLAGS.loss_fn_name,
augment_data=FLAGS.augment_data,
weight_init_method=FLAGS.weight_init_method,
# ======== Debug Flags ======== #
# 0: No debug
# 1: Layer Creation Debug Info
# 2: Layer + Var Creation Debug Info
debug_verbosity=FLAGS.debug_verbosity,
log_every_n_steps=FLAGS.display_every,
seed=FLAGS.seed,
)
# ===================================
if RUNNING_CONFIG.dataset_name == "DAGM2007":
RUNNING_CONFIG.dataset_hparams["class_id"] = FLAGS.dataset_classID
runner = Runner(
input_format=RUNNING_CONFIG.input_format,
compute_format=RUNNING_CONFIG.compute_format,
n_channels=RUNNING_CONFIG.n_channels,
model_variant=RUNNING_CONFIG.unet_variant,
activation_fn=RUNNING_CONFIG.activation_fn,
input_shape=RUNNING_CONFIG.input_shape,
mask_shape=RUNNING_CONFIG.mask_shape,
input_normalization_method=RUNNING_CONFIG.input_normalization_method,
# Training HParams
augment_data=RUNNING_CONFIG.augment_data,
loss_fn_name=RUNNING_CONFIG.loss_fn_name,
weight_init_method=RUNNING_CONFIG.weight_init_method,
# Runtime HParams
amp=RUNNING_CONFIG.amp,
xla=RUNNING_CONFIG.xla,
# Directory Params
log_dir=RUNNING_CONFIG.log_dir,
model_dir=RUNNING_CONFIG.model_dir,
sample_dir=RUNNING_CONFIG.sample_dir,
data_dir=RUNNING_CONFIG.data_dir,
dataset_name=RUNNING_CONFIG.dataset_name,
dataset_hparams=RUNNING_CONFIG.dataset_hparams,
# Debug Params
debug_verbosity=RUNNING_CONFIG.debug_verbosity,
log_every_n_steps=RUNNING_CONFIG.log_every_n_steps,
seed=RUNNING_CONFIG.seed
)
if RUNNING_CONFIG.exec_mode in ["train", "train_and_evaluate", "training_benchmark"]:
runner.train(
iter_unit=RUNNING_CONFIG.iter_unit,
num_iter=RUNNING_CONFIG.num_iter,
batch_size=RUNNING_CONFIG.batch_size,
warmup_steps=RUNNING_CONFIG.warmup_steps,
weight_decay=RUNNING_CONFIG.weight_decay,
learning_rate=RUNNING_CONFIG.learning_rate,
learning_rate_decay_factor=RUNNING_CONFIG.learning_rate_decay_factor,
learning_rate_decay_steps=RUNNING_CONFIG.learning_rate_decay_steps,
rmsprop_decay=RUNNING_CONFIG.rmsprop_decay,
rmsprop_momentum=RUNNING_CONFIG.rmsprop_momentum,
use_auto_loss_scaling=FLAGS.use_auto_loss_scaling,
augment_data=RUNNING_CONFIG.augment_data,
is_benchmark=RUNNING_CONFIG.exec_mode == 'training_benchmark'
)
if RUNNING_CONFIG.exec_mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark'] and hvd.rank() == 0:
runner.evaluate(
iter_unit=RUNNING_CONFIG.iter_unit if RUNNING_CONFIG.exec_mode != "train_and_evaluate" else "epoch",
num_iter=RUNNING_CONFIG.num_iter if RUNNING_CONFIG.exec_mode != "train_and_evaluate" else 1,
warmup_steps=RUNNING_CONFIG.warmup_steps,
batch_size=RUNNING_CONFIG.batch_size,
is_benchmark=RUNNING_CONFIG.exec_mode == 'inference_benchmark',
save_eval_results_to_json=RUNNING_CONFIG.save_eval_results_to_json
)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/main.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) Jonathan Dekhtiar - [email protected]
# All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import glob
import tensorflow as tf
import horovod.tensorflow as hvd
from datasets.core import BaseDataset
from utils import hvd_utils
from dllogger import Logger
__all__ = ['DAGM2007_Dataset']
class DAGM2007_Dataset(BaseDataset):
dataset_name = "DAGM2007"
def __init__(self, data_dir, class_id):
if class_id is None:
raise ValueError("The parameter `class_id` cannot be set to None")
data_dir = os.path.join(data_dir, "raw_images/private/Class%d" % class_id)
super(DAGM2007_Dataset, self).__init__(data_dir)
def _get_data_dirs(self, training):
if training:
csv_file = os.path.join(self.data_dir, "train_list.csv")
image_dir = os.path.join(self.data_dir, "Train")
else:
csv_file = os.path.join(self.data_dir, "test_list.csv")
image_dir = os.path.join(self.data_dir, "Test")
return image_dir, csv_file
def get_dataset_runtime_specs(self, training, iter_unit, num_iter, global_batch_size):
image_dir, _ = self._get_data_dirs(training=training)
filenames = glob.glob(os.path.join(image_dir, "*.PNG"))
num_samples = len(filenames)
num_steps, num_epochs = DAGM2007_Dataset._count_steps(
iter_unit=iter_unit, num_samples=num_samples, num_iter=num_iter, global_batch_size=global_batch_size
)
return filenames, num_samples, num_steps, num_epochs
def dataset_fn(
self,
batch_size,
training,
input_shape,
mask_shape,
num_threads,
use_gpu_prefetch,
normalize_data_method,
only_defective_images,
augment_data,
seed=None
):
super(DAGM2007_Dataset, self).dataset_fn(
batch_size=batch_size,
training=training,
input_shape=input_shape,
mask_shape=mask_shape,
num_threads=num_threads,
use_gpu_prefetch=use_gpu_prefetch,
normalize_data_method=normalize_data_method, # [None, "zero_centered", "zero_one"]
only_defective_images=only_defective_images,
augment_data=augment_data,
seed=seed
)
shuffle_buffer_size = 10000
image_dir, csv_file = self._get_data_dirs(training=training)
mask_image_dir = os.path.join(image_dir, "Label")
dataset = tf.data.TextLineDataset(csv_file)
dataset = dataset.skip(1) # Skip CSV Header
if only_defective_images:
dataset = dataset.filter(lambda line: tf.not_equal(tf.strings.substr(line, -1, 1), "0"))
if hvd_utils.is_using_hvd() and training:
dataset = dataset.shard(hvd.size(), hvd.rank())
def _load_dagm_data(line):
input_image_name, image_mask_name, label = tf.decode_csv(
line, record_defaults=[[""], [""], [0]], field_delim=','
)
def decode_image(filepath, resize_shape, normalize_data_method):
image_content = tf.read_file(filepath)
# image = tf.image.decode_image(image_content, channels=resize_shape[-1])
image = tf.image.decode_png(contents=image_content, channels=resize_shape[-1], dtype=tf.uint8)
image = tf.image.resize_images(
image,
size=resize_shape[:2],
method=tf.image.ResizeMethod.BILINEAR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
align_corners=False,
preserve_aspect_ratio=True
)
image.set_shape(resize_shape)
image = tf.cast(image, tf.float32)
if normalize_data_method == "zero_centered":
image = tf.divide(image, 127.5) - 1
elif normalize_data_method == "zero_one":
image = tf.divide(image, 255.0)
return image
input_image = decode_image(
filepath=tf.strings.join([image_dir, input_image_name], separator='/'),
resize_shape=input_shape,
normalize_data_method=normalize_data_method,
)
mask_image = tf.cond(
tf.equal(image_mask_name, ""),
true_fn=lambda: tf.zeros(mask_shape, dtype=tf.float32),
false_fn=lambda: decode_image(
filepath=tf.strings.join([mask_image_dir, image_mask_name], separator='/'),
resize_shape=mask_shape,
normalize_data_method="zero_one",
),
)
label = tf.cast(label, tf.int32)
return tf.data.Dataset.from_tensor_slices(([input_image], [mask_image], [label]))
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_load_dagm_data,
cycle_length=batch_size*8,
block_length=4,
buffer_output_elements=batch_size*8
)
)
dataset = dataset.cache()
if training:
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=shuffle_buffer_size, seed=seed))
else:
dataset = dataset.repeat()
def _augment_data(input_image, mask_image, label):
if augment_data:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Using data augmentation ...")
#input_image = tf.image.per_image_standardization(input_image)
horizontal_flip = tf.random_uniform(shape=(), seed=seed) > 0.5
input_image = tf.cond(
horizontal_flip, lambda: tf.image.flip_left_right(input_image), lambda: input_image
)
mask_image = tf.cond(horizontal_flip, lambda: tf.image.flip_left_right(mask_image), lambda: mask_image)
n_rots = tf.random_uniform(shape=(), dtype=tf.int32, minval=0, maxval=3, seed=seed)
input_image = tf.image.rot90(input_image, k=n_rots)
mask_image = tf.image.rot90(mask_image, k=n_rots)
return (input_image, mask_image), label
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
map_func=_augment_data,
num_parallel_calls=num_threads,
batch_size=batch_size,
drop_remainder=True,
)
)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
if use_gpu_prefetch:
dataset.apply(tf.data.experimental.prefetch_to_device(device="/gpu:0", buffer_size=4))
return dataset
if __name__ == "__main__":
'''
Data Loading Benchmark Usage:
# Real Data - Training
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--training \
--class_id=1
# Real Data - Inference
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1
# --------------- #
# Synthetic Data - Training
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1 \
--training \
--use_synthetic_data
# Synthetic Data - Inference
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1 \
--use_synthetic_data
# --------------- #
'''
import time
import argparse
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser(description="DAGM2007_data_loader_benchmark")
parser.add_argument(
'--data_dir', required=True, type=str, help="Directory path which contains the preprocessed DAGM 2007 dataset"
)
parser.add_argument(
'--batch_size', default=64, type=int, required=True, help="""Batch size used to measure performance."""
)
parser.add_argument(
'--warmup_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--benchmark_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--class_id',
default=1,
choices=range(1, 11), # between 1 and 10
type=int,
required=True,
help="""Class ID used for benchmark."""
)
parser.add_argument("--training", default=False, action="store_true", help="Benchmark in training mode")
parser.add_argument("--use_synthetic_data", default=False, action="store_true", help="Use synthetic dataset")
FLAGS, unknown_args = parser.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
BURNIN_STEPS = FLAGS.warmup_steps
TOTAL_STEPS = FLAGS.warmup_steps + FLAGS.benchmark_steps
dataset = DAGM2007_Dataset(data_dir=FLAGS.data_dir, class_id=FLAGS.class_id)
_filenames, _num_samples, _num_steps, _num_epochs = dataset.get_dataset_runtime_specs(
training=FLAGS.training, iter_unit="batch", num_iter=TOTAL_STEPS, global_batch_size=FLAGS.batch_size
)
tf.logging.info("[*] Executing Benchmark in %s mode" % ("training" if FLAGS.training else "inference"))
tf.logging.info("[*] Benchmark using %s data" % ("synthetic" if FLAGS.use_synthetic_data else "real"))
print()
tf.logging.info("[*] num_samples: %d" % _num_samples)
tf.logging.info("[*] num_steps: %d" % _num_steps)
tf.logging.info("[*] num_epochs: %d" % _num_epochs)
time.sleep(4)
if not FLAGS.use_synthetic_data:
# Build the data input
dataset = dataset.dataset_fn(
batch_size=FLAGS.batch_size,
training=FLAGS.training,
input_shape=(512, 512, 1),
mask_shape=(512, 512, 1),
num_threads=64,
use_gpu_prefetch=True,
seed=None
)
else:
# Build the data input
dataset = dataset.synth_dataset_fn(
batch_size=FLAGS.batch_size,
training=FLAGS.training,
input_shape=(512, 512, 1),
mask_shape=(512, 512, 1),
num_threads=64,
use_gpu_prefetch=True,
seed=None
)
dataset_iterator = dataset.make_initializable_iterator()
(input_images, mask_images), labels = dataset_iterator.get_next()
print("Input Image Shape: %s" % (input_images.get_shape()))
print("Mask Image Shape: %s" % (mask_images.get_shape()))
print("Label Shape: %s" % (labels.get_shape()))
input_images = tf.image.resize_image_with_crop_or_pad(input_images, target_height=512, target_width=512)
with tf.device("/gpu:0"):
input_images = tf.identity(input_images)
mask_images = tf.identity(mask_images)
labels = tf.identity(labels)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
with tf.Session(config=config) as sess:
sess.run(dataset_iterator.initializer)
sess.run(tf.global_variables_initializer())
sess.run(tf.global_variables_initializer())
total_files_processed = 0
img_per_sec_arr = []
processing_time_arr = []
processing_start_time = time.time()
for step in range(TOTAL_STEPS):
start_time = time.time()
img_batch, mask_batch, lbl_batch = sess.run([input_images, mask_images, labels])
batch_size = img_batch.shape[0]
total_files_processed += batch_size
elapsed_time = (time.time() - start_time) * 1000
imgs_per_sec = (batch_size / elapsed_time) * 1000
if (step + 1) > BURNIN_STEPS:
processing_time_arr.append(elapsed_time)
img_per_sec_arr.append(imgs_per_sec)
if (step + 1) % 20 == 0 or (step + 1) == TOTAL_STEPS:
print(
"[STEP %04d] # Files: %03d - Time: %03d msecs - Speed: %6d img/s" %
(step + 1, batch_size, elapsed_time, imgs_per_sec)
)
processing_time = time.time() - processing_start_time
avg_processing_speed = np.mean(img_per_sec_arr)
print("\n###################################################################")
print("*** Data Loading Performance Metrics ***\n")
print("\t=> Number of Steps: %d" % (step + 1))
print("\t=> Batch Size: %d" % FLAGS.batch_size)
print("\t=> Files Processed: %d" % total_files_processed)
print("\t=> Total Execution Time: %d secs" % processing_time)
print("\t=> Median Time per step: %3d msecs" % np.median(processing_time_arr))
print("\t=> Median Processing Speed: %d images/secs" % np.median(img_per_sec_arr))
print("\t=> Median Processing Time: %.2f msecs/image" % (1 / float(np.median(img_per_sec_arr)) * 1000))
print("\n*** Debug Shape Information:")
print(
"\t[*] Batch Shape: %s - Max Val: %.2f - Min Val: %.2f - Mean: %.2f - Stddev: %.2f" % (
str(img_batch.shape), np.max(img_batch), np.min(img_batch), float(np.mean(img_batch)),
float(np.std(img_batch))
)
)
print(
"\t[*] Mask Shape: %s - Max Val: %.2f - Min Val: %.2f - Mean: %.2f - Stddev: %.2f" % (
str(mask_batch.shape), np.max(mask_batch), np.min(mask_batch), float(np.mean(mask_batch)),
float(np.std(mask_batch))
)
)
print(
"\t[*] Label Shape: %s - Max Val: %.2f - Min Val: %.2f" %
(str(lbl_batch.shape), np.max(lbl_batch), np.min(lbl_batch))
)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/datasets/dagm2007.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from datasets import core
from datasets.dagm2007 import DAGM2007_Dataset
known_datasets = {cls.dataset_name: cls for cls in core.BaseDataset.__subclasses__()}
__all__ = ['known_datasets']
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/datasets/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
from abc import ABC, abstractmethod
import math
import tensorflow as tf
__all__ = ["BaseDataset"]
class BaseDataset(ABC):
authorized_normalization_methods = [None, "zero_centered", "zero_one"]
def __init__(self, data_dir):
self.data_dir = data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError("The dataset directory `%s` does not exist." % data_dir)
@staticmethod
def _count_steps(iter_unit, num_samples, num_iter, global_batch_size):
if iter_unit not in ["batch", "epoch"]:
raise ValueError("Invalid `iter_unit` value: %s" % iter_unit)
if iter_unit == 'epoch':
num_steps = (num_samples // global_batch_size) * num_iter
num_epochs = num_iter
else:
num_steps = num_iter
num_epochs = math.ceil(num_steps / (num_samples // global_batch_size))
return num_steps, num_epochs
@abstractmethod
def dataset_name(self):
raise NotImplementedError
@abstractmethod
def get_dataset_runtime_specs(self, training, iter_unit, num_iter, global_batch_size):
# return filenames, num_samples, num_steps, num_epochs
raise NotImplementedError
@abstractmethod
def dataset_fn(
self,
batch_size,
training,
input_shape,
mask_shape,
num_threads,
use_gpu_prefetch,
normalize_data_method,
only_defective_images,
augment_data,
seed=None
):
if normalize_data_method not in BaseDataset.authorized_normalization_methods:
raise ValueError(
'Unknown `normalize_data_method`: %s - Authorized: %s' %
(normalize_data_method, BaseDataset.authorized_normalization_methods)
)
def synth_dataset_fn(
self,
batch_size,
training,
input_shape,
mask_shape,
num_threads,
use_gpu_prefetch,
normalize_data_method,
only_defective_images,
augment_data,
seed=None
):
if normalize_data_method not in BaseDataset.authorized_normalization_methods:
raise ValueError(
'Unknown `normalize_data_method`: %s - Authorized: %s' %
(normalize_data_method, BaseDataset.authorized_normalization_methods)
)
input_shape = [batch_size] + list(input_shape)
mask_shape = [batch_size] + list(mask_shape)
# Convert the inputs to a Dataset
if normalize_data_method is None:
mean_val = 127.5
elif normalize_data_method == "zero_centered":
mean_val = 0
else:
mean_val = 0.5
inputs = tf.truncated_normal(
input_shape, dtype=tf.float32, mean=mean_val, stddev=1, seed=seed, name='synth_inputs'
)
masks = tf.truncated_normal(mask_shape, dtype=tf.float32, mean=0.01, stddev=0.1, seed=seed, name='synth_masks')
labels = tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32, name='synthetic_labels')
dataset = tf.data.Dataset.from_tensors(((inputs, masks), labels))
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
if use_gpu_prefetch:
dataset.apply(tf.data.experimental.prefetch_to_device(device="/gpu:0", buffer_size=batch_size * 8))
return dataset
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/datasets/core.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import json
import multiprocessing
import operator
import random
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from datasets import known_datasets
from model.unet import UNet_v1
from utils import hvd_utils
from utils.hooks import ProfilerHook
import dllogger as Logger
__all__ = [
'Runner',
]
class Runner(object):
def __init__(
self,
# Model Params
input_format, # NCHW or NHWC
compute_format, # NCHW or NHWC
n_channels,
activation_fn,
weight_init_method,
model_variant,
input_shape,
mask_shape,
input_normalization_method,
# Training HParams
augment_data,
loss_fn_name,
# Runtime HParams
amp,
xla,
# Directory Params
model_dir=None,
log_dir=None,
sample_dir=None,
data_dir=None,
dataset_name=None,
dataset_hparams=None,
# Debug Params
log_every_n_steps=1,
debug_verbosity=0,
seed=None
):
if dataset_hparams is None:
dataset_hparams = dict()
if compute_format not in ["NHWC", 'NCHW']:
raise ValueError("Unknown `compute_format` received: %s (allowed: ['NHWC', 'NCHW'])" % compute_format)
if input_format not in ["NHWC", 'NCHW']:
raise ValueError("Unknown `input_format` received: %s (allowed: ['NHWC', 'NCHW'])" % input_format)
if n_channels not in [1, 3]:
raise ValueError("Unsupported number of channels: %d (allowed: 1 (grayscale) and 3 (color))" % n_channels)
if data_dir is not None and not os.path.exists(data_dir):
raise ValueError("The `data_dir` received does not exists: %s" % data_dir)
if hvd_utils.is_using_hvd():
hvd.init()
if hvd.rank() == 0:
print("Horovod successfully initialized ...")
tf_seed = 2 * (seed + hvd.rank()) if seed is not None else None
else:
tf_seed = 2 * seed if seed is not None else None
# ============================================
# Optimisation Flags - Do not remove
# ============================================
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '1' if not hvd_utils.is_using_hvd() else str(hvd.size())
print("WORLD_SIZE", hvd.size())
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
# =================================================
self.xla = xla
if amp:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("TF AMP is activated - Experimental Feature")
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
# =================================================
model_hparams = tf.contrib.training.HParams(
# Model Params
input_format=input_format,
compute_format=compute_format,
input_shape=input_shape,
mask_shape=mask_shape,
n_channels=n_channels,
activation_fn=activation_fn,
weight_init_method=weight_init_method,
model_variant=model_variant,
input_normalization_method=input_normalization_method,
# Training HParams
augment_data=augment_data,
loss_fn_name=loss_fn_name,
# Runtime Params
amp=amp,
# Debug Params
log_every_n_steps=log_every_n_steps,
debug_verbosity=debug_verbosity,
seed=tf_seed
)
run_config_additional = tf.contrib.training.HParams(
dataset_hparams=dataset_hparams,
model_dir=model_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
log_dir=log_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
sample_dir=sample_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
data_dir=data_dir,
num_preprocessing_threads=32,
)
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
try:
os.makedirs(sample_dir)
except FileExistsError:
pass
self.run_hparams = Runner._build_hparams(model_hparams, run_config_additional)
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print('Defining Model Estimator ...\n')
self._model = UNet_v1(
model_name="UNet_v1",
input_format=self.run_hparams.input_format,
compute_format=self.run_hparams.compute_format,
n_output_channels=1,
unet_variant=self.run_hparams.model_variant,
weight_init_method=self.run_hparams.weight_init_method,
activation_fn=self.run_hparams.activation_fn
)
if self.run_hparams.seed is not None:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Deterministic Run - Seed: %d\n" % seed)
tf.set_random_seed(self.run_hparams.seed)
np.random.seed(self.run_hparams.seed)
random.seed(self.run_hparams.seed)
if dataset_name not in known_datasets.keys():
raise RuntimeError(
"The dataset `%s` is unknown, allowed values: %s ..." % (dataset_name, list(known_datasets.keys()))
)
self.dataset = known_datasets[dataset_name](data_dir=data_dir, **self.run_hparams.dataset_hparams)
self.num_gpus = 1 if not hvd_utils.is_using_hvd() else hvd.size()
@staticmethod
def _build_hparams(*args):
hparams = tf.contrib.training.HParams()
for _hparams in args:
if not isinstance(_hparams, tf.contrib.training.HParams):
raise ValueError("Non valid HParams argument object detected:", _hparams)
for key, val in _hparams.values().items():
try:
hparams.add_hparam(name=key, value=val)
except ValueError:
print(
"the parameter `{}` already exists - existing value: {} and duplicated value: {}".format(
key, hparams.get(key), val
)
)
return hparams
@staticmethod
def _get_global_batch_size(worker_batch_size):
if hvd_utils.is_using_hvd():
return worker_batch_size * hvd.size()
else:
return worker_batch_size
@staticmethod
def _get_session_config(mode, xla):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
config = tf.ConfigProto()
config.allow_soft_placement = True
config.log_device_placement = False
config.gpu_options.allow_growth = True
if hvd_utils.is_using_hvd():
config.gpu_options.visible_device_list = str(hvd.rank())
if xla:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("XLA is activated - Experimental Feature")
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config.gpu_options.force_gpu_compatible = True # Force pinned memory
# TODO: Provide correct session configuration for both
# variations with comments explaining why specific options were used
if mode == 'train':
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
if hvd_utils.is_using_hvd():
config.inter_op_parallelism_threads = max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)
else:
config.inter_op_parallelism_threads = 4
return config
@staticmethod
def _get_run_config(mode, model_dir, xla, seed=None):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
if seed is not None:
if hvd_utils.is_using_hvd():
tf_random_seed = 2 * (seed + hvd.rank())
else:
tf_random_seed = 2 * seed
else:
tf_random_seed = None
config = tf.estimator.RunConfig(
model_dir=model_dir,
tf_random_seed=tf_random_seed,
save_summary_steps=10 if mode == "train" else 1e9, # disabled
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=Runner._get_session_config(mode=mode, xla=xla),
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=1e6, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None
)
if mode == 'train':
if hvd_utils.is_using_hvd():
config = config.replace(
save_checkpoints_steps=1000 if hvd.rank() == 0 else None, keep_checkpoint_every_n_hours=3
)
else:
config = config.replace(save_checkpoints_steps=1000, keep_checkpoint_every_n_hours=3)
return config
def _get_estimator(self, mode, run_params, xla):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
run_config = Runner._get_run_config(
mode=mode, model_dir=self.run_hparams.model_dir, xla=xla, seed=self.run_hparams.seed
)
return tf.estimator.Estimator(
model_fn=self._model, model_dir=self.run_hparams.model_dir, config=run_config, params=run_params
)
def train(
self,
iter_unit,
num_iter,
batch_size,
weight_decay,
learning_rate,
learning_rate_decay_factor,
learning_rate_decay_steps,
rmsprop_decay,
rmsprop_momentum,
use_auto_loss_scaling,
augment_data,
warmup_steps=50,
is_benchmark=False
):
if iter_unit not in ["epoch", "batch"]:
raise ValueError('`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for training!')
if self.run_hparams.amp:
if use_auto_loss_scaling:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("TF Loss Auto Scaling is activated - Experimental Feature")
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1"
apply_manual_loss_scaling = False
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "0"
apply_manual_loss_scaling = True
else:
apply_manual_loss_scaling = False
global_batch_size = batch_size * self.num_gpus
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs = self.dataset.get_dataset_runtime_specs(
training=True, iter_unit=iter_unit, num_iter=num_iter, global_batch_size=global_batch_size
)
steps_per_epoch = int(num_steps / num_epochs)
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = 625
training_hooks = []
if hvd_utils.is_using_hvd():
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
training_hooks.append(
ProfilerHook(
global_batch_size=global_batch_size,
log_every=self.run_hparams.log_every_n_steps,
warmup_steps=warmup_steps,
is_training=True,
sample_dir=self.run_hparams.sample_dir
)
)
print("Starting Model Training ...")
Logger.log(step=('PARAMETER'), data={"Epochs": num_epochs}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Steps": num_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Steps per Epoch": steps_per_epoch}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Weight Decay Factor": weight_decay}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate": learning_rate}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate Decay Factor": learning_rate_decay_factor}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate Decay Steps": learning_rate_decay_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"RMSProp - Decay": rmsprop_decay}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"RMSProp - Momentum": rmsprop_momentum}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Loss Function Name": self.run_hparams.loss_fn_name}, verbosity=Logger.Verbosity.DEFAULT)
if self.run_hparams.amp:
Logger.log(step=('PARAMETER'), data={"Use Auto Loss Scaling": use_auto_loss_scaling}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"# GPUs": self.num_gpus}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"GPU Batch Size": batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Global Batch Size": global_batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Files to be Processed": num_steps * global_batch_size}, verbosity=Logger.Verbosity.DEFAULT)
print() # visual spacing
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'learning_rate': learning_rate,
'learning_rate_decay_steps': learning_rate_decay_steps,
'learning_rate_decay_factor': learning_rate_decay_factor,
'rmsprop_decay': rmsprop_decay,
'rmsprop_momentum': rmsprop_momentum,
'weight_decay': weight_decay,
'apply_manual_loss_scaling': apply_manual_loss_scaling,
'loss_fn_name': self.run_hparams.loss_fn_name,
'debug_verbosity': self.run_hparams.debug_verbosity,
}
def training_data_fn():
if not is_benchmark or self.run_hparams.data_dir is not None:
return self.dataset.dataset_fn(
batch_size=batch_size,
training=True,
only_defective_images=True,
augment_data=augment_data,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
seed=self.run_hparams.seed
)
else:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Using Synthetic Data ...")
return self.dataset.synth_dataset_fn(
batch_size=batch_size,
training=True,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=True,
augment_data=augment_data,
seed=self.run_hparams.seed
)
model = self._get_estimator(mode='train', run_params=estimator_params, xla=self.xla)
try:
model.train(
input_fn=training_data_fn,
steps=num_steps,
hooks=training_hooks,
)
except KeyboardInterrupt:
print("Keyboard interrupt")
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print('Ending Model Training ...')
def evaluate(self, iter_unit, num_iter, batch_size, warmup_steps=50, is_benchmark=False, save_eval_results_to_json=False):
if iter_unit not in ["epoch", "batch"]:
raise ValueError('`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for evaluation!')
# if hvd_utils.is_using_hvd() and hvd.rank() != 0:
# raise RuntimeError('Multi-GPU inference is not supported')
print('Defining Model Estimator ...\n')
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs = self.dataset.get_dataset_runtime_specs(
training=False, iter_unit=iter_unit, num_iter=num_iter, global_batch_size=batch_size
)
steps_per_epoch = num_steps / num_epochs
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = num_steps
evaluation_hooks = [
ProfilerHook(
global_batch_size=batch_size,
log_every=self.run_hparams.log_every_n_steps,
warmup_steps=warmup_steps,
is_training=False,
sample_dir=self.run_hparams.sample_dir
)
]
print('Starting Model Evaluation ...\n')
Logger.log(step=('PARAMETER'), data={"Epochs": num_epochs}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Steps": num_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Steps per Epoch": steps_per_epoch}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"GPU Batch Size": batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Files to Processed": num_steps * batch_size}, verbosity=Logger.Verbosity.DEFAULT)
print() # visual spacing
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'loss_fn_name': self.run_hparams.loss_fn_name,
'debug_verbosity': self.run_hparams.debug_verbosity,
}
def evaluation_data_fn():
if not is_benchmark or self.run_hparams.data_dir is not None:
return self.dataset.dataset_fn(
batch_size=batch_size,
training=False,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=False,
augment_data=False,
seed=self.run_hparams.seed
)
else:
print("Using Synthetic Data ...")
return self.dataset.synth_dataset_fn(
batch_size=batch_size,
training=False,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=False,
augment_data=False,
seed=self.run_hparams.seed
)
model = self._get_estimator(mode='validation', run_params=estimator_params, xla=self.xla)
try:
eval_results = model.evaluate(
input_fn=evaluation_data_fn,
steps=num_steps,
hooks=evaluation_hooks,
)
print('Ending Model Evaluation ...')
print('###################################\n\nEvaluation Results:\n')
data_to_log = {"{prefix}.{key}".format(prefix=Logger._stage, key=key): float(val)
for key, val in sorted(eval_results.items(), key=operator.itemgetter(0))
if not any(val in key for val in ["loss", "global_step", "Confusion_Matrix"])}
Logger.log(step=(), data=data_to_log, verbosity=Logger.Verbosity.DEFAULT)
fns = eval_results["Confusion_Matrix_FN"]
fps = eval_results["Confusion_Matrix_FP"]
tns = eval_results["Confusion_Matrix_TN"]
tps = eval_results["Confusion_Matrix_TP"]
positives = np.add(tps, fns)
negatives = np.add(tns, fps)
tpr = np.divide(tps, positives)
tnr = np.divide(tns, negatives)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_positives".format(prefix=Logger._stage): str(tps)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_negatives".format(prefix=Logger._stage): str(tns)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.false_positives".format(prefix=Logger._stage): str(fps)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.false_negatives".format(prefix=Logger._stage): str(fns)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_positive_rate".format(prefix=Logger._stage): str(["%.3f" % x for x in tpr])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_negative_rate".format(prefix=Logger._stage): str(["%.3f" % x for x in tnr])},
verbosity=Logger.Verbosity.DEFAULT
)
if save_eval_results_to_json:
results_dict = {
'IoU': {
'0.75': str(eval_results["IoU_THS_0.75"]),
'0.85': str(eval_results["IoU_THS_0.85"]),
'0.95': str(eval_results["IoU_THS_0.95"]),
'0.99': str(eval_results["IoU_THS_0.99"]),
},
'TPR': {
'0.75': str(tpr[-4]),
'0.85': str(tpr[-3]),
'0.95': str(tpr[-2]),
'0.99': str(tpr[-1]),
},
'TNR': {
'0.75': str(tnr[-4]),
'0.85': str(tnr[-3]),
'0.95': str(tnr[-2]),
'0.99': str(tnr[-1]),
}
}
with open(os.path.join(self.run_hparams.model_dir, "..", "results.json"), 'w') as f:
json.dump(results_dict, f)
except KeyboardInterrupt:
print("Keyboard interrupt")
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/runtime/runner.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from runtime.runner import Runner | DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/runtime/__init__.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import dllogger as Logger
def format_step(step):
if isinstance(step, str):
return step
if isinstance(step, int):
return "Iteration: {} ".format(step)
s = ""
if len(step) > 0:
s += "Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
def init_dllogger(log_dir):
Logger.init([
Logger.StdOutBackend(Logger.Verbosity.DEFAULT, step_format=format_step),
Logger.JSONStreamBackend(Logger.Verbosity.VERBOSE, log_dir)
])
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/logging.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
__all__ = [
"iou_score",
]
def iou_score(y_pred, y_true, threshold, eps=1e-5):
y_true = tf.cast(y_true > threshold, dtype=tf.float32)
y_pred = tf.cast(y_pred > threshold, dtype=tf.float32)
intersection = y_true * y_pred
intersection = tf.reduce_sum(intersection, axis=(1, 2, 3))
numerator = 2.0 * intersection + eps
divisor = tf.reduce_sum(y_true, axis=(1, 2, 3)) + tf.reduce_sum(y_pred, axis=(1, 2, 3)) + eps
return tf.reduce_mean(numerator / divisor)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/metrics.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import argparse
from datasets import known_datasets
from model.unet import UNet_v1
from model.blocks.activation_blck import authorized_activation_fn
def _add_bool_argument(parser, name=None, default=False, required=False, help=None):
if not isinstance(default, bool):
raise ValueError()
feature_parser = parser.add_mutually_exclusive_group(required=required)
feature_parser.add_argument('--' + name, dest=name, action='store_true', help=help, default=default)
feature_parser.add_argument('--no' + name, dest=name, action='store_false')
feature_parser.set_defaults(name=default)
def parse_cmdline():
p = argparse.ArgumentParser(description="JoC-UNet_v1-TF")
p.add_argument(
'--unet_variant',
default="tinyUNet",
choices=UNet_v1.authorized_models_variants,
type=str,
required=False,
help="""Which model size is used. This parameter control directly the size and the number of parameters"""
)
p.add_argument(
'--activation_fn',
choices=authorized_activation_fn,
type=str,
default="relu",
required=False,
help="""Which activation function is used after the convolution layers"""
)
p.add_argument(
'--exec_mode',
choices=['train', 'train_and_evaluate', 'evaluate', 'training_benchmark', 'inference_benchmark'],
type=str,
required=True,
help="""Which execution mode to run the model into"""
)
p.add_argument(
'--iter_unit',
choices=['epoch', 'batch'],
type=str,
required=True,
help="""Will the model be run for X batches or X epochs ?"""
)
p.add_argument('--num_iter', type=int, required=True, help="""Number of iterations to run.""")
p.add_argument('--batch_size', type=int, required=True, help="""Size of each minibatch per GPU.""")
p.add_argument(
'--warmup_step',
default=200,
type=int,
required=False,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
p.add_argument(
'--results_dir',
type=str,
required=True,
help="""Directory in which to write training logs, summaries and checkpoints."""
)
p.add_argument(
'--log_dir',
type=str,
required=False,
default="dlloger_out.json",
help="""Directory in which to write logs."""
)
_add_bool_argument(
parser=p,
name="save_eval_results_to_json",
default=False,
required=False,
help="Whether to save evaluation results in JSON format."
)
p.add_argument('--data_dir', required=False, default=None, type=str, help="Path to dataset directory")
p.add_argument(
'--dataset_name',
choices=list(known_datasets.keys()),
type=str,
required=True,
help="""Name of the dataset used in this run (only DAGM2007 is supported atm.)"""
)
p.add_argument(
'--dataset_classID',
default=None,
type=int,
required=False,
help="""ClassID to consider to train or evaluate the network (used for DAGM)."""
)
p.add_argument(
'--data_format',
choices=['NHWC', 'NCHW'],
type=str,
default="NCHW",
required=False,
help="""Which Tensor format is used for computation inside the mode"""
)
_add_bool_argument(
parser=p,
name="amp",
default=False,
required=False,
help="Enable Automatic Mixed Precision to speedup FP32 computation using tensor cores"
)
_add_bool_argument(
parser=p, name="xla", default=False, required=False, help="Enable Tensorflow XLA to maximise performance."
)
p.add_argument(
'--weight_init_method',
choices=UNet_v1.authorized_weight_init_methods,
default="he_normal",
type=str,
required=False,
help="""Which initialisation method is used to randomly intialize the model during training"""
)
p.add_argument('--learning_rate', default=1e-4, type=float, required=False, help="""Learning rate value.""")
p.add_argument(
'--learning_rate_decay_factor',
default=0.8,
type=float,
required=False,
help="""Decay factor to decrease the learning rate."""
)
p.add_argument(
'--learning_rate_decay_steps',
default=500,
type=int,
required=False,
help="""Decay factor to decrease the learning rate."""
)
p.add_argument('--rmsprop_decay', default=0.9, type=float, required=False, help="""RMSProp - Decay value.""")
p.add_argument('--rmsprop_momentum', default=0.8, type=float, required=False, help="""RMSProp - Momentum value.""")
p.add_argument('--weight_decay', default=1e-5, type=float, required=False, help="""Weight Decay scale factor""")
_add_bool_argument(
parser=p, name="use_auto_loss_scaling", default=False, required=False, help="Use AutoLossScaling with TF-AMP"
)
p.add_argument(
'--loss_fn_name',
type=str,
default="adaptive_loss",
required=False,
help="""Loss function Name to use to train the network"""
)
_add_bool_argument(
parser=p, name="augment_data", default=True, required=False, help="Choose whether to use data augmentation"
)
p.add_argument(
'--display_every',
type=int,
default=50,
required=False,
help="""How often (in batches) to print out debug information."""
)
p.add_argument(
'--debug_verbosity',
choices=[0, 1, 2],
default=0,
type=int,
required=False,
help="""Verbosity Level: 0 minimum, 1 with layer creation debug info, 2 with layer + var creation debug info."""
)
p.add_argument('--seed', type=int, default=None, help="""Random seed.""")
FLAGS, unknown_args = p.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
return FLAGS
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/cmdline_helper.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
__all__ = ["is_using_hvd"]
def is_using_hvd():
return True
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/hvd_utils.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from utils import hooks
from utils import cmdline_helper
from utils import hvd_utils
from utils import image_processing
from utils import logging
from utils import losses
from utils import metrics
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/__init__.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
__all__ = ["regularization_l2loss", "reconstruction_l2loss", "reconstruction_x_entropy", "adaptive_loss"]
def regularization_l2loss(weight_decay):
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN"""
return all(
[tensor_name not in name.lower() for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]]
)
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def reconstruction_l2loss(y_pred, y_true):
reconstruction_err = tf.subtract(y_pred, y_true)
return tf.reduce_mean(tf.nn.l2_loss(reconstruction_err), name='reconstruction_loss_l2_loss')
def reconstruction_x_entropy(y_pred, y_true, from_logits=False):
return tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=y_true, y_pred=y_pred, from_logits=from_logits))
def dice_coe(y_pred, y_true, loss_type='jaccard', smooth=1.):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
y_true : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
y_pred : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background),
dice = ```smooth/(small_value + smooth)``,
then if smooth is very small, dice close to 0 (even the image values lower than the threshold),
so in this case, higher smooth can have a higher dice.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
y_true_f = tf.layers.flatten(y_true)
y_pred_f = tf.layers.flatten(y_pred)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
if loss_type == 'jaccard':
union = tf.reduce_sum(tf.square(y_pred_f)) + tf.reduce_sum(tf.square(y_true_f))
elif loss_type == 'sorensen':
union = tf.reduce_sum(y_pred_f) + tf.reduce_sum(y_true_f)
else:
raise ValueError("Unknown `loss_type`: %s" % loss_type)
return (2. * intersection + smooth) / (union + smooth)
def adaptive_loss(y_pred, y_pred_logits, y_true, switch_at_threshold=0.3, loss_type='jaccard'):
dice_loss = 1 - dice_coe(y_pred=y_pred, y_true=y_true, loss_type=loss_type, smooth=1.)
return tf.cond(
dice_loss < switch_at_threshold,
true_fn=lambda: dice_loss,
false_fn=lambda: reconstruction_x_entropy(y_pred=y_pred_logits, y_true=y_true, from_logits=True)
)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/losses.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
__all__ = ["binarize_output"]
def binarize_output(image, threshold=None):
if threshold is not None:
image = tf.cast(image > threshold, dtype=tf.uint8)
image = image * 255
else:
image = tf.cast(image * 255, dtype=tf.uint8)
encoded_image = tf.image.encode_jpeg(image, format='grayscale', quality=100)
if image.get_shape().rank == 3:
image = tf.expand_dims(image, axis=0)
return image, encoded_image
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/image_processing.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from utils.hooks.profiler_hook import *
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/hooks/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import json
import time
import operator
import numpy as np
import tensorflow as tf
import dllogger as Logger
__all__ = ["ProfilerHook"]
class ProfilerHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, sample_dir, log_every=10, warmup_steps=20, is_training=True):
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._current_step = 0
self._log_every = log_every
self._t0 = None
self._start_training_time = None
self._is_training = is_training
self._sample_dir = sample_dir
self._processing_speed_arr = list()
@staticmethod
def moving_average(a, n=4):
if len(a) < n:
return [np.mean(a)]
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def after_create_session(self, session, coord):
params_count = tf.get_default_graph().get_tensor_by_name("trainable_parameters_count_ref:0")
_params_count = session.run(params_count)
Logger._stage = "train" if self._is_training else "eval"
Logger.log(
step=('PARAMETER'),
data={"# Total Trainable Parameters": int(_params_count)}, verbosity=Logger.Verbosity.DEFAULT
)
Logger.metadata(
metric="{prefix}.avg_ips".format(prefix=Logger._stage),
metadata={"unit": "imgs/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()}
)
for ths in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
Logger.metadata(
metric="{prefix}.IoU_THS_{ths}".format(prefix=Logger._stage, ths=ths),
metadata={"format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()}
)
if self._is_training:
Logger.metadata(
metric="{prefix}.learning_rate".format(prefix=Logger._stage),
metadata={"format": ":.3e", "GOAL": "NONE", "STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.weight_decay".format(prefix=Logger._stage),
metadata={"format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.reconstruction_loss".format(prefix=Logger._stage),
metadata={"format": ":.3f", "GOAL": "MINIMIZE", "STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.total_loss".format(prefix=Logger._stage),
metadata={"format": ":.3f", "GOAL": "MINIMIZE", "STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.true_positives".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.true_negatives".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.false_positives".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.false_negatives".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.true_positive_rate".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
Logger.metadata(
metric="{prefix}.true_negative_rate".format(prefix=Logger._stage),
metadata={"STAGE": Logger._stage.upper()}
)
self._start_training_time = time.time()
def before_run(self, run_context):
self._current_step += 1
request_fetches = dict()
if self._current_step % self._log_every == 0:
additional_fetches = {
'total_loss': tf.get_default_graph().get_tensor_by_name("losses/total_loss_ref:0"),
'iou_scores': dict(),
'confusion_matrix': dict()
}
if self._is_training:
additional_fetches["weight_decay"] = tf.get_default_graph().get_tensor_by_name("losses/l2_loss_ref:0")
additional_fetches["reconstruction_loss"] = tf.get_default_graph(
).get_tensor_by_name("losses/reconstruction_loss_ref:0")
additional_fetches["learning_rate"] = tf.get_default_graph(
).get_tensor_by_name("optimizers/learning_rate_ref:0")
# ==================== Samples ==================== #
if self._sample_dir is not None and self._is_training:
additional_fetches["samples"] = {}
additional_fetches["samples"]["input_image"] = tf.get_default_graph(
).get_tensor_by_name("input_image_jpeg_ref:0")
additional_fetches["samples"]["mask"] = tf.get_default_graph().get_tensor_by_name("mask_sample_ref:0")
for threshold in [None, 0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
additional_fetches["samples"][str(threshold)] = tf.get_default_graph().get_tensor_by_name(
"output_sample_ths_%s_ref:0" % threshold
)
# ==================== Evaluation Metrics ==================== #
for threshold in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
if threshold is not None:
additional_fetches["iou_scores"][str(threshold)] = tf.get_default_graph().get_tensor_by_name(
"IoU_Metrics/iou_score_ths_%s_ref:0" % threshold
)
additional_fetches["confusion_matrix"]["tp"] = tf.get_default_graph(
).get_tensor_by_name("Confusion_Matrix/true_positives_ref:0")
additional_fetches["confusion_matrix"]["tn"] = tf.get_default_graph(
).get_tensor_by_name("Confusion_Matrix/true_negatives_ref:0")
additional_fetches["confusion_matrix"]["fp"] = tf.get_default_graph(
).get_tensor_by_name("Confusion_Matrix/false_positives_ref:0")
additional_fetches["confusion_matrix"]["fn"] = tf.get_default_graph(
).get_tensor_by_name("Confusion_Matrix/false_negatives_ref:0")
# Update `request_fetches` dict
request_fetches.update(additional_fetches)
print("\n######### START: %d ##############" % self._current_step)
self._t0 = time.time()
return tf.train.SessionRunArgs(fetches=request_fetches)
def after_run(self, run_context, run_values):
batch_time = time.time() - self._t0
imgs_per_sec = int(self._global_batch_size / batch_time)
is_log_step = self._current_step % self._log_every == 0
if is_log_step:
if self._current_step > self._warmup_steps:
imgs_per_sec = float(ProfilerHook.moving_average(self._processing_speed_arr, n=30)[-1])
Logger.log(
step=(self._current_step,),
data={"{prefix}.avg_ips".format(prefix=Logger._stage): float(imgs_per_sec)},
verbosity=Logger.Verbosity.DEFAULT
)
if self._is_training:
Logger.log(
step=(self._current_step,),
data={"{prefix}.weight_decay".format(prefix=Logger._stage): float(run_values.results["weight_decay"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.reconstruction_loss".format(prefix=Logger._stage): float(run_values.results["reconstruction_loss"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.total_loss".format(prefix=Logger._stage): float(run_values.results["total_loss"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.learning_rate".format(prefix=Logger._stage): float(run_values.results["learning_rate"])},
verbosity=Logger.Verbosity.DEFAULT
)
for key, val in sorted(run_values.results["iou_scores"].items(), key=operator.itemgetter(0)):
Logger.log(
step=(self._current_step,),
data={"{prefix}.IoU_THS_{ths}".format(prefix=Logger._stage, ths=key): float(val)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.true_positives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["tp"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.true_negatives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["tn"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.false_positives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["fp"])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(self._current_step,),
data={"{prefix}.false_negatives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["fn"])},
verbosity=Logger.Verbosity.DEFAULT
)
if self._sample_dir is not None and self._is_training:
for key in sorted(run_values.results["samples"].keys(), key=operator.itemgetter(0)):
with open(
os.path.join(self._sample_dir, "sample_step_%04d_ths_%s.jpeg" % (self._current_step, key)), 'wb'
) as fd:
fd.write(run_values.results["samples"][key])
with open(
os.path.join(self._sample_dir, "sample_step_%04d_mask.jpeg" % self._current_step), 'wb'
) as fd:
fd.write(run_values.results["samples"]["mask"])
print("######### STOP: %d ##############" % self._current_step)
elif self._current_step > self._warmup_steps:
# Do not store speed for log step due to additional fetches
self._processing_speed_arr.append(imgs_per_sec)
def end(self, session):
try:
avg_processing_speed = float(ProfilerHook.moving_average(self._processing_speed_arr, n=100)[-1])
except:
avg_processing_speed = float(np.mean(self._processing_speed_arr))
total_processing_time = time.time() - self._start_training_time
total_processing_hours, rem = divmod(total_processing_time, 3600)
print("\n============== Final Summary ==============")
Logger.log(
step=(),
data={"{prefix}.avg_ips".format(prefix=Logger._stage): avg_processing_speed},
verbosity=Logger.Verbosity.DEFAULT
)
perf_dict = {'throughput': str(avg_processing_speed), 'processing_time': str(total_processing_time)}
perf_filename = "performances_%s.json" % ("train" if self._is_training else "eval")
with open(os.path.join(self._sample_dir, "..", perf_filename), 'w') as f:
json.dump(perf_dict, f)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/utils/hooks/profiler_hook.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
import horovod.tensorflow as hvd
from model import layers
from model import blocks
from utils import hvd_utils
from utils import losses
from utils import metrics
from utils import image_processing
from dllogger import Logger
__all__ = ["UNet_v1"]
class UNet_v1(object):
authorized_weight_init_methods = [
"he_normal",
"he_uniform",
"glorot_normal",
"glorot_uniform",
"orthogonal",
]
authorized_models_variants = [
"original",
"tinyUNet",
]
def __init__(
self,
model_name,
compute_format,
input_format,
n_output_channels,
unet_variant,
activation_fn,
weight_init_method,
):
if unet_variant == "original": # Total Params: 36,950,273
input_filters = 64
unet_block_filters = [128, 256, 512]
bottleneck_filters = 1024
output_filters = 64
elif unet_variant == "tinyUNet": # Total Params: 1,824,945
input_filters = 32
unet_block_filters = [32, 64, 128]
bottleneck_filters = 256
output_filters = 32
else:
raise ValueError(
"Unknown `UNet` variant: %s. Authorized: %s" % (unet_variant, UNet_v1.authorized_models_variants)
)
if activation_fn not in blocks.authorized_activation_fn:
raise ValueError(
"Unknown activation function: %s - Authorised: %s" % (activation_fn, blocks.authorized_activation_fn)
)
self.model_hparams = tf.contrib.training.HParams(
compute_format=compute_format,
input_format=input_format,
input_filters=input_filters,
unet_block_filters=unet_block_filters,
bottleneck_filters=bottleneck_filters,
output_filters=output_filters,
n_output_channels=n_output_channels,
model_name=model_name,
)
self.conv2d_hparams = tf.contrib.training.HParams(
kernel_initializer=None, bias_initializer=tf.initializers.constant(0.0), activation_fn=activation_fn
)
if weight_init_method == "he_normal":
self.conv2d_hparams.kernel_initializer = tf.initializers.variance_scaling(
scale=2.0, distribution='truncated_normal', mode='fan_in'
)
elif weight_init_method == "he_uniform":
self.conv2d_hparams.kernel_initializer = tf.initializers.variance_scaling(
scale=2.0, distribution='uniform', mode='fan_in'
)
elif weight_init_method == "glorot_normal":
self.conv2d_hparams.kernel_initializer = tf.initializers.variance_scaling(
scale=1.0, distribution='truncated_normal', mode='fan_avg'
)
elif weight_init_method == "glorot_uniform":
self.conv2d_hparams.kernel_initializer = tf.initializers.variance_scaling(
scale=1.0, distribution='uniform', mode='fan_avg'
)
elif weight_init_method == "orthogonal":
self.conv2d_hparams.kernel_initializer = tf.initializers.orthogonal(gain=1.0)
else:
raise ValueError(
"Unknown weight init method: %s - Authorized: %s" %
(weight_init_method, UNet_v1.authorized_weight_init_methods)
)
def __call__(self, features, labels, mode, params):
if "debug_verbosity" not in params.keys():
raise RuntimeError("Parameter `debug_verbosity` is missing...")
if mode == tf.estimator.ModeKeys.TRAIN:
if "rmsprop_decay" not in params.keys():
raise RuntimeError("Parameter `rmsprop_decay` is missing...")
if "rmsprop_momentum" not in params.keys():
raise RuntimeError("Parameter `rmsprop_momentum` is missing...")
if "learning_rate" not in params.keys():
raise RuntimeError("Parameter `learning_rate` is missing...")
if "learning_rate_decay_steps" not in params.keys():
raise RuntimeError("Parameter `learning_rate` is missing...")
if "learning_rate_decay_factor" not in params.keys():
raise RuntimeError("Parameter `learning_rate` is missing...")
if "weight_decay" not in params.keys():
raise RuntimeError("Parameter `weight_decay` is missing...")
if "loss_fn_name" not in params.keys():
raise RuntimeError("Parameter `loss_fn_name` is missing...")
if mode == tf.estimator.ModeKeys.PREDICT:
y_pred, y_pred_logits = self.build_model(
features, training=False, reuse=False, debug_verbosity=params["debug_verbosity"]
)
predictions = {'logits': y_pred}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
input_image, mask_image = features
with tf.device("/gpu:0"):
tf.identity(input_image, name="input_image_ref")
tf.identity(mask_image, name="mask_image_ref")
tf.identity(labels, name="labels_ref")
y_pred, y_pred_logits = self.build_model(
input_image,
training=mode == tf.estimator.ModeKeys.TRAIN,
reuse=False,
debug_verbosity=params["debug_verbosity"]
)
all_trainable_vars = tf.reduce_sum([tf.reduce_prod(v.shape) for v in tf.trainable_variables()])
tf.identity(all_trainable_vars, name='trainable_parameters_count_ref')
if mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = dict()
# ==================== Samples ==================== #
image_uint8 = tf.cast((input_image + 1) * 127.5, dtype=tf.uint8)
input_image_jpeg = tf.image.encode_jpeg(image_uint8[0], format='grayscale', quality=100)
tf.identity(input_image_jpeg, name="input_image_jpeg_ref")
for threshold in [None, 0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
binarize_img, binarize_img_jpeg = image_processing.binarize_output(y_pred[0], threshold=threshold)
tf.identity(binarize_img_jpeg, name="output_sample_ths_%s_ref" % threshold)
tf.summary.image('output_sample_ths_%s' % threshold, binarize_img, 10)
# ==============+ Evaluation Metrics ==================== #
with tf.name_scope("IoU_Metrics"):
for threshold in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
iou_score = metrics.iou_score(y_pred=y_pred, y_true=mask_image, threshold=threshold)
tf.identity(iou_score, name='iou_score_ths_%s_ref' % threshold)
tf.summary.scalar('iou_score_ths_%s' % threshold, iou_score)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metrics["IoU_THS_%s" % threshold] = tf.metrics.mean(iou_score)
labels = tf.cast(labels, tf.float32)
labels_preds = tf.reduce_max(y_pred, axis=(1, 2, 3))
assert (
abs(labels_preds - tf.clip_by_value(labels_preds, 0, 1)) < 0.00001,
"Clipping labels_preds introduces non-trivial loss."
)
labels_preds = tf.clip_by_value(labels_preds, 0, 1)
with tf.variable_scope("Confusion_Matrix") as scope:
tp, update_tp = tf.metrics.true_positives_at_thresholds(
labels=labels,
predictions=labels_preds,
thresholds=[0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99],
)
tn, update_tn = tf.metrics.true_negatives_at_thresholds(
labels=labels,
predictions=labels_preds,
thresholds=[0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99],
)
fp, update_fp = tf.metrics.false_positives_at_thresholds(
labels=labels,
predictions=labels_preds,
thresholds=[0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99],
)
fn, update_fn = tf.metrics.false_negatives_at_thresholds(
labels=labels,
predictions=labels_preds,
thresholds=[0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99],
)
if mode == tf.estimator.ModeKeys.TRAIN:
local_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope=scope.name)
confusion_matrix_reset_op = tf.initializers.variables(local_vars, name='reset_op')
with tf.control_dependencies([confusion_matrix_reset_op]):
with tf.control_dependencies([update_tp, update_tn, update_fp, update_fn]):
tp = tf.identity(tp)
tn = tf.identity(tn)
fp = tf.identity(fp)
fn = tf.identity(fn)
else:
eval_metrics["Confusion_Matrix_TP"] = tp, update_tp
eval_metrics["Confusion_Matrix_TN"] = tn, update_tn
eval_metrics["Confusion_Matrix_FP"] = fp, update_fp
eval_metrics["Confusion_Matrix_FN"] = fn, update_fn
tf.identity(tp, name='true_positives_ref') # Confusion_Matrix/true_positives_ref:0
tf.identity(tn, name='true_negatives_ref') # Confusion_Matrix/true_negatives_ref:0
tf.identity(fp, name='false_positives_ref') # Confusion_Matrix/false_positives_ref:0
tf.identity(fn, name='false_negatives_ref') # Confusion_Matrix/false_negatives_ref:0
tf.summary.scalar('true_positives', tp[3]) # For Ths = 0.5
tf.summary.scalar('true_negatives', tn[3]) # For Ths = 0.5
tf.summary.scalar('false_positives', fp[3]) # For Ths = 0.5
tf.summary.scalar('false_negatives', fn[3]) # For Ths = 0.5
binarized_mask, binarized_mask_jpeg = image_processing.binarize_output(mask_image[0], threshold=0.5)
tf.identity(binarized_mask_jpeg, name="mask_sample_ref")
tf.summary.image('sample_mask', binarized_mask, 10)
##########################
mask_max_val = tf.reduce_max(mask_image)
tf.identity(mask_max_val, name='mask_max_val_ref')
mask_min_val = tf.reduce_min(mask_image)
tf.identity(mask_min_val, name='mask_min_val_ref')
mask_mean_val = tf.reduce_mean(mask_image)
tf.identity(mask_mean_val, name='mask_mean_val_ref')
mask_std_val = tf.math.reduce_std(mask_image)
tf.identity(mask_std_val, name='mask_std_val_ref')
##########################
output_max_val = tf.reduce_max(y_pred)
tf.identity(output_max_val, name='output_max_val_ref')
output_min_val = tf.reduce_min(y_pred)
tf.identity(output_min_val, name='output_min_val_ref')
output_mean_val = tf.reduce_mean(y_pred)
tf.identity(output_mean_val, name='output_mean_val_ref')
output_std_val = tf.math.reduce_std(y_pred)
tf.identity(output_std_val, name='output_std_val_ref')
with tf.variable_scope("losses"):
# ==============+ Reconstruction Loss ==================== #
if params["loss_fn_name"] == "x-entropy":
reconstruction_loss = losses.reconstruction_x_entropy(y_pred=y_pred, y_true=mask_image)
elif params["loss_fn_name"] == "l2_loss":
reconstruction_loss = losses.reconstruction_l2loss(y_pred=y_pred, y_true=mask_image)
elif params["loss_fn_name"] == "dice_sorensen":
reconstruction_loss = 1 - losses.dice_coe(y_pred=y_pred, y_true=mask_image, loss_type='sorensen')
elif params["loss_fn_name"] == "dice_jaccard":
reconstruction_loss = 1 - losses.dice_coe(y_pred=y_pred, y_true=mask_image, loss_type='jaccard')
elif params["loss_fn_name"] == "adaptive_loss":
reconstruction_loss = losses.adaptive_loss(
y_pred=y_pred,
y_pred_logits=y_pred_logits,
y_true=mask_image,
switch_at_threshold=0.3,
loss_type='sorensen'
)
else:
raise ValueError("Unknown loss function received: %s" % params["loss_fn_name"])
tf.identity(reconstruction_loss, name='reconstruction_loss_ref')
tf.summary.scalar('reconstruction_loss', reconstruction_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
# ============== Regularization Loss ==================== #
l2_loss = losses.regularization_l2loss(weight_decay=params["weight_decay"])
tf.identity(l2_loss, name='l2_loss_ref')
tf.summary.scalar('l2_loss', l2_loss)
total_loss = tf.add(reconstruction_loss, l2_loss, name="total_loss")
else:
total_loss = reconstruction_loss
tf.identity(total_loss, name='total_loss_ref')
tf.summary.scalar('total_loss', total_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.variable_scope("optimizers"):
# Update Global Step
global_step = tf.train.get_or_create_global_step()
tf.identity(global_step, name="global_step_ref")
learning_rate = tf.train.exponential_decay(
learning_rate=params["learning_rate"],
decay_steps=params["learning_rate_decay_steps"],
decay_rate=params["learning_rate_decay_factor"],
global_step=global_step,
staircase=True
)
tf.identity(learning_rate, name="learning_rate_ref")
tf.summary.scalar('learning_rate_ref', learning_rate)
opt = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
use_locking=False,
centered=True,
decay=params["rmsprop_decay"],
momentum=params["rmsprop_momentum"],
)
if hvd_utils.is_using_hvd():
opt = hvd.DistributedOptimizer(opt, device_dense='/gpu:0')
if params["apply_manual_loss_scaling"]:
# if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
# Logger.log("Applying manual Loss Scaling ...")
loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(
init_loss_scale=2**32, # 4,294,967,296
incr_every_n_steps=1000
)
opt = tf.contrib.mixed_precision.LossScaleOptimizer(opt, loss_scale_manager)
deterministic = True
gate_gradients = (tf.train.Optimizer.GATE_OP if deterministic else tf.train.Optimizer.GATE_NONE)
backprop_op = opt.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
train_op = tf.group(backprop_op, tf.get_collection(tf.GraphKeys.UPDATE_OPS))
return tf.estimator.EstimatorSpec(
mode,
loss=total_loss,
train_op=train_op,
)
elif mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=total_loss, eval_metric_ops=eval_metrics, predictions={"output": y_pred}
)
else:
raise NotImplementedError('Unknown mode {}'.format(mode))
def build_model(self, inputs, training=True, reuse=False, debug_verbosity=0):
"""
U-Net: Convolutional Networks for Biomedical Image Segmentation
https://arxiv.org/pdf/1505.04597
"""
skip_connections = []
with tf.variable_scope(self.model_hparams.model_name, reuse=reuse):
with tf.variable_scope("input_reshape"):
with tf.variable_scope("initial_zero_padding"):
inputs = tf.image.resize_image_with_crop_or_pad(inputs, target_height=512, target_width=512)
if self.model_hparams.input_format == 'NHWC' and self.model_hparams.compute_format == 'NCHW':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
# Reshape inputs: NHWC => NCHW
net = tf.transpose(inputs, [0, 3, 1, 2])
elif self.model_hparams.input_format == 'NCHW' and self.model_hparams.compute_format == 'NHWC':
# Reshape inputs: NCHW => NHWC
net = tf.transpose(inputs, [0, 2, 3, 1])
else:
net = inputs
# net, out = input_block(net, filters=64)
net, out = blocks.input_unet_block(
net,
filters=self.model_hparams.input_filters,
data_format=self.model_hparams.compute_format,
is_training=training,
conv2d_hparams=self.conv2d_hparams
)
skip_connections.append(out)
for idx, filters in enumerate(self.model_hparams.unet_block_filters):
# net, out = downsample_block(net, filters=filters, idx=idx)
net, skip_connect = blocks.downsample_unet_block(
net,
filters=filters,
data_format=self.model_hparams.compute_format,
is_training=training,
conv2d_hparams=self.conv2d_hparams,
block_name="downsample_block_%d" % (idx + 1)
)
skip_connections.append(skip_connect)
net = blocks.bottleneck_unet_block(
net,
filters=self.model_hparams.bottleneck_filters,
data_format=self.model_hparams.compute_format,
is_training=training,
conv2d_hparams=self.conv2d_hparams,
)
for idx, filters in enumerate(reversed(self.model_hparams.unet_block_filters)):
net = blocks.upsample_unet_block(
net,
residual_input=skip_connections.pop(),
filters=filters,
data_format=self.model_hparams.compute_format,
is_training=training,
conv2d_hparams=self.conv2d_hparams,
block_name='upsample_block_%d' % (idx + 1)
)
logits = blocks.output_unet_block(
inputs=net,
residual_input=skip_connections.pop(),
filters=self.model_hparams.output_filters,
n_output_channels=self.model_hparams.n_output_channels,
data_format=self.model_hparams.compute_format,
is_training=training,
conv2d_hparams=self.conv2d_hparams,
block_name='ouputs_block'
)
if self.model_hparams.compute_format == "NCHW":
logits = tf.transpose(logits, [0, 2, 3, 1])
outputs = layers.sigmoid(logits)
return outputs, logits
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/unet.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from model import layers
from model import blocks
from model import unet
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/__init__.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ["bottleneck_unet_block"]
def bottleneck_unet_block(
inputs, filters, data_format='NCHW', is_training=True, conv2d_hparams=None, block_name='bottleneck_block'
):
with tf.variable_scope(block_name):
net = layers.conv2d(
inputs,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
net = layers.deconv2d(
net,
n_channels=filters / 2,
kernel_size=(2, 2),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act3'
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/unet_bottleneck.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ["upsample_unet_block"]
def upsample_unet_block(
inputs,
residual_input,
filters,
data_format='NCHW',
is_training=True,
conv2d_hparams=None,
block_name='upsample_block'
):
if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if not isinstance(residual_input, tf.Tensor):
raise ValueError("`residual_input` should be a Tensorflow Tensor")
with tf.variable_scope(block_name):
net = layers.concat([inputs, residual_input], axis=1 if data_format == 'NCHW' else 3)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters / 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
net = layers.deconv2d(
net,
n_channels=filters / 2,
kernel_size=(2, 2),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act3'
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/unet_upsample.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from model.blocks.activation_blck import activation_block
from model.blocks.activation_blck import authorized_activation_fn
from model.blocks.unet_downsample import downsample_unet_block
from model.blocks.unet_upsample import upsample_unet_block
from model.blocks.unet_bottleneck import bottleneck_unet_block
from model.blocks.unet_io_blocks import input_unet_block
from model.blocks.unet_io_blocks import output_unet_block
__all__ = [
'activation_block',
'authorized_activation_fn',
'upsample_unet_block',
'upsample_unet_block',
'bottleneck_unet_block',
'input_unet_block',
'output_unet_block',
]
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/__init__.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ["downsample_unet_block"]
def downsample_unet_block(
inputs, filters, data_format='NCHW', is_training=True, conv2d_hparams=None, block_name='downsample_block'
):
if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
with tf.variable_scope(block_name):
net = layers.conv2d(
inputs,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
outputs = layers.max_pooling2d(
inputs=net,
pool_size=(2, 2),
strides=(2, 2),
padding='valid',
data_format=data_format,
name="max_pooling2d"
)
return outputs, net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/unet_downsample.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
__all__ = [
"authorized_activation_fn",
"activation_block",
]
authorized_activation_fn = ["relu", "leaky_relu", "prelu_shared", "prelu_not_shared", "selu", "crelu", "elu"]
def activation_block(inputs, act_fn, trainable=True, block_name='activation'):
with tf.variable_scope(block_name):
if act_fn == "relu":
return layers.relu(inputs)
if act_fn == "leaky_relu":
return layers.leaky_relu(inputs, alpha=0.2)
if act_fn == "prelu_shared":
return layers.prelu(inputs, channel_shared=True, trainable=trainable)
if act_fn == "prelu_not_shared":
return layers.prelu(inputs, channel_shared=False, trainable=trainable)
if act_fn == "selu":
return layers.selu(inputs)
if act_fn == "crelu":
return layers.crelu(inputs)
if act_fn == "elu":
return layers.elu(inputs)
raise ValueError("Unknown activation function: %s - Authorized: %s" % (act_fn, authorized_activation_fn))
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/activation_blck.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ["input_unet_block", "output_unet_block"]
def input_unet_block(
inputs, filters, data_format='NCHW', is_training=True, conv2d_hparams=None, block_name='input_block'
):
with tf.variable_scope(block_name):
net = layers.conv2d(
inputs,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
outputs = layers.max_pooling2d(
inputs=net,
pool_size=(2, 2),
strides=(2, 2),
padding='valid',
data_format=data_format,
name="max_pooling2d"
)
return outputs, net
def output_unet_block(
inputs,
residual_input,
filters,
n_output_channels,
data_format='NCHW',
is_training=True,
conv2d_hparams=None,
block_name='output_block'
):
with tf.variable_scope(block_name):
net = layers.concat([inputs, residual_input], axis=1 if data_format == 'NCHW' else 3)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
net = layers.conv2d(
net,
n_channels=n_output_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/blocks/unet_io_blocks.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['concat', 'flatten', 'reshape', 'squeeze', 'upscale_2d']
def concat(values, axis, name='concat'):
net = tf.concat(values=values, axis=axis, name=name)
_log_hparams(classname='Concat', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def flatten(inputs, name='flatten'):
net = tf.layers.flatten(inputs, name=name)
_log_hparams(classname='Flatten', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def reshape(tensor, shape, name='reshape'):
net = tf.reshape(tensor, shape=shape, name=name)
_log_hparams(
classname='Reshape', layername=net.name, shape=shape, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def squeeze(tensor, axis, name='squeeze'):
net = tf.squeeze(tensor, axis=axis, name=name)
_log_hparams(
classname='Squeeze', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def upscale_2d(inputs, size, is_scale=True, method=0, align_corners=True, data_format='NHWC', name='upsample2d_layer'):
if not isinstance(size, (list, tuple)) and len(size) == 2:
raise AssertionError()
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format received: `%s` (allowed: `NHWC`, `NCHW`)" % data_format)
input_shape = inputs.get_shape()
if len(inputs.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(inputs.get_shape()[0])
size_w = size[1] * int(inputs.get_shape()[1])
_size = [size_h, size_w]
else:
_size = size
elif len(inputs.get_shape()) == 4:
if data_format == 'NCHW':
inputs = tf.transpose(inputs, [0, 2, 3, 1]) # NCHW => NHWC
if is_scale:
size_h = size[0] * int(inputs.get_shape()[1])
size_w = size[1] * int(inputs.get_shape()[2])
_size = [size_h, size_w]
else:
_size = size
else:
raise Exception("Do not support shape %s" % str(inputs.get_shape()))
with tf.variable_scope(name):
net = tf.image.resize_images(inputs, size=_size, method=method, align_corners=align_corners)
if data_format == 'NCHW' and len(inputs.get_shape()) == 4:
net = tf.transpose(net, [0, 3, 1, 2]) # NHWC => NCHW
_log_hparams(
classname='Upscale2D',
layername=net.name,
size=size,
is_scale=is_scale,
method=method,
align_corners=align_corners,
data_format=data_format,
input_shape=str(input_shape),
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/array_ops.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['dense']
def dense(
inputs,
units,
use_bias=True,
trainable=True,
kernel_initializer=tf.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer()
):
net = tf.layers.dense(
inputs,
units=units,
activation=None,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
trainable=trainable
)
_log_hparams(
classname='Dense',
layername=net.name,
units=units,
use_bias=use_bias,
trainable=trainable,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/dense.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['average_pooling2d', 'max_pooling2d']
def average_pooling2d(inputs, pool_size=(2, 2), strides=None, padding='valid', data_format=None, name="avg_pooling2d"):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.lower() not in ['same', 'valid']:
raise ValueError("Unknown padding: `%s` (accepted: ['same', 'valid'])" % padding)
'''
net = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name,
)(inputs)
'''
net = tf.layers.average_pooling2d(
inputs,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name
)
_log_hparams(
classname='AveragePooling2D',
layername=net.name,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
out_shape=str(net.get_shape())
)
return net
def max_pooling2d(inputs, pool_size=(2, 2), strides=None, padding='valid', data_format=None, name="max_pooling2d"):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.lower() not in ['same', 'valid']:
raise ValueError("Unknown padding: `%s` (accepted: ['same', 'valid'])" % padding)
'''
net = tf.keras.layers.MaxPool2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name,
)(inputs)
'''
net = tf.layers.max_pooling2d(
inputs,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format='channels_first' if data_format == 'NCHW' else 'channels_last',
name=name
)
_log_hparams(
classname='MaxPooling2D',
layername=net.name,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/pooling.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from model.layers.utils import _log_hparams
from model.layers.activation import crelu
from model.layers.activation import elu
from model.layers.activation import leaky_relu
from model.layers.activation import prelu
from model.layers.activation import relu
from model.layers.activation import relu6
from model.layers.activation import selu
from model.layers.activation import sigmoid
from model.layers.activation import softmax
from model.layers.activation import tanh
from model.layers.conv2d import conv2d
from model.layers.deconv2d import deconv2d
from model.layers.dense import dense
from model.layers.drop_layers import dropout
from model.layers.math_ops import reduce_mean
from model.layers.normalization import batch_norm
from model.layers.padding import pad
from model.layers.pooling import average_pooling2d
from model.layers.pooling import max_pooling2d
from model.layers.array_ops import concat
from model.layers.array_ops import flatten
from model.layers.array_ops import reshape
from model.layers.array_ops import squeeze
from model.layers.array_ops import upscale_2d
__all__ = [
# activation layers
'crelu',
'elu',
'leaky_relu',
'prelu',
'relu',
'relu6',
'selu',
'sigmoid',
'softmax',
'tanh',
# array ops
'concat',
'flatten',
'reshape',
'squeeze',
'upscale_2d',
# conv layers
'conv2d',
# deconv layers
'deconv2d',
# dense layers
'dense',
# drop layers
'dropout',
# math_ops layers
'reduce_mean',
# normalization layers
'batch_norm',
# padding layers
'pad',
# pooling layers
'average_pooling2d',
'max_pooling2d',
]
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['crelu', 'elu', 'leaky_relu', 'prelu', 'relu', 'relu6', 'selu', 'sigmoid', 'softmax', 'tanh']
def crelu(features, name='crelu', axis=-1):
net = tf.nn.crelu(features, name=name, axis=axis)
_log_hparams(classname='CReLU', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def elu(features, name='elu'):
net = tf.nn.elu(features, name=name)
_log_hparams(classname='ELU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def leaky_relu(features, alpha=0.2, name='leaky_relu'):
net = tf.nn.leaky_relu(features, alpha=alpha, name=name)
_log_hparams(
classname='LeakyReLU', layername=net.name, alpha=alpha, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def prelu(inputs, channel_shared=False, trainable=True, name='prelu'):
def parametric_relu(_x):
if channel_shared:
w_shape = (1, )
else:
w_shape = int(_x.get_shape()[-1])
alphas = tf.get_variable(
'alpha', w_shape, trainable=trainable, initializer=tf.initializers.truncated_normal(mean=-1.0, stddev=0.2)
)
alphas = tf.nn.sigmoid(alphas, name="constraining_alpha_var_in_0_1")
return tf.maximum(_x, _x * alphas)
with tf.variable_scope(name):
net = parametric_relu(inputs)
_log_hparams(
classname='PReLU',
layername=net.name,
channel_shared=channel_shared,
trainable=trainable,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
def relu(inputs, name='relu'):
net = tf.nn.relu(inputs, name=name)
_log_hparams(classname='ReLU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def relu6(inputs, name='relu6'):
net = tf.nn.relu6(inputs, name=name)
_log_hparams(classname='ReLU6', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def selu(features, name='selu'):
net = tf.nn.selu(features, name=name)
_log_hparams(classname='SELU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def sigmoid(x, name='sigmoid'):
net = tf.math.sigmoid(x, name=name)
_log_hparams(classname='Sigmoid', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def softmax(inputs, axis=None, name="softmax"):
net = tf.nn.softmax(
inputs,
axis=axis,
name=name,
)
_log_hparams(
classname='Softmax', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def tanh(inputs, name='tanh'):
net = tf.math.tanh(inputs, name=name)
_log_hparams(classname='TanH', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/activation.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['conv2d']
def conv2d(
inputs,
n_channels=8,
kernel_size=(3, 3),
strides=(1, 1),
padding='VALID',
data_format='NHWC',
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
trainable=True
):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding.upper() not in ['SAME', 'VALID']:
raise ValueError("Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" % padding.upper())
net = tf.layers.conv2d(
inputs,
filters=n_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
data_format='channels_last' if data_format == 'NHWC' else 'channels_first',
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
trainable=trainable,
activation=None
)
_log_hparams(
classname='Conv2D',
layername=net.name,
n_channels=n_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
use_bias=use_bias,
trainable=trainable,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net | DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/conv2d.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import horovod.tensorflow as hvd
from utils import hvd_utils
__all__ = ["_log_hparams"]
def _log_hparams(classname, layername, **kwargs):
log_msg = "%s: `%s`" % (classname, layername)
for arg, val in sorted(kwargs.items()):
log_msg += "\n\t[*] {}: {}".format(arg, val)
log_msg += "\n"
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print(log_msg)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/utils.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['dropout']
def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):
layer = tf.keras.layers.Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
net = layer.apply(inputs, training=training)
_log_hparams(
classname='Dropout',
layername=net.name,
noise_shape=noise_shape,
training=training,
seed=seed,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/drop_layers.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['reduce_mean']
def reduce_mean(inputs, keepdims=None, data_format='channels_last', name='spatial_mean'):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
axes = [1, 2] if data_format == 'NHWC' else [2, 3]
net = tf.math.reduce_mean(inputs, axis=axes, keepdims=keepdims, name=name)
_log_hparams(
classname='ReduceMean',
layername=net.name,
axis=axes,
keepdims=keepdims,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/math_ops.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import inspect
import tensorflow as tf
from model.layers import _log_hparams
__all__ = ['batch_norm']
def batch_norm(
inputs,
decay=0.999,
epsilon=0.001,
scale=False,
center=True,
is_training=True,
data_format='NHWC',
param_initializers=None
):
"""Adds a Batch Normalization layer."""
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if param_initializers is not None:
for key, initializer in param_initializers.items():
if key not in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
raise ValueError("Unknown key received: `%s`" % key)
if inspect.isclass(initializer):
initializer = initializer()
setattr(param_initializers, key, initializer)
if initializer.__class__.__module__ != 'tensorflow.python.ops.init_ops':
raise ValueError("The object `%s` is not a Tensor initializer" % str(initializer))
input_shape = inputs.get_shape()
input_rank = input_shape.ndims
input_channels = input_shape[1]
if input_rank == 2:
if data_format == 'NCHW':
new_shape = [-1, input_channels, 1, 1]
else:
new_shape = [-1, 1, 1, input_channels]
inputs = tf.reshape(inputs, new_shape)
net = tf.contrib.layers.batch_norm(
inputs,
decay=decay,
scale=scale,
epsilon=epsilon,
is_training=is_training,
trainable=is_training,
fused=True,
data_format=data_format,
center=center,
param_initializers=param_initializers
)
if input_rank == 2:
net = tf.reshape(net, [-1, input_channels])
_log_hparams(
classname='BatchNorm',
layername=net.name,
data_format=data_format,
is_training=is_training,
decay=decay,
epsilon=epsilon,
scale=scale,
center=center,
fused=True,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net | DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/normalization.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model.layers.utils import _log_hparams
__all__ = ['deconv2d']
def deconv2d(
inputs,
n_channels=8,
kernel_size=(3, 3),
padding='VALID',
data_format='NHWC',
use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer(),
trainable=True,
use_upscale_conv=True
):
padding = padding.upper() # Enforce capital letters for the padding mode
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if padding not in ['SAME', 'VALID']:
raise ValueError("Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" % padding)
with tf.variable_scope("deconv2d"):
if use_upscale_conv:
layer = layers.upscale_2d(
inputs,
size=(2, 2),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
align_corners=True,
is_scale=True,
data_format=data_format
)
layer = layers.conv2d(
layer,
n_channels=n_channels,
kernel_size=kernel_size,
strides=(1, 1),
padding=padding,
data_format=data_format,
use_bias=use_bias,
trainable=trainable,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)
else:
input_shape = inputs.get_shape()
layer = tf.layers.conv2d_transpose(
inputs=inputs,
filters=n_channels,
kernel_size=kernel_size,
strides=(2, 2),
padding=padding,
data_format='channels_first' if data_format == "NCHW" else "channels_last",
use_bias=use_bias,
trainable=trainable,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)
_log_hparams(
classname='Conv2DTranspose',
layername=layer.name,
n_channels=n_channels,
kernel_size=kernel_size,
strides=(2, 2),
padding=padding,
data_format=data_format,
use_bias=use_bias,
trainable=trainable,
input_shape=str(input_shape),
out_shape=str(layer.get_shape()),
out_dtype=layer.dtype
)
return layer
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/deconv2d.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['pad']
def pad(inputs, paddings, mode='CONSTANT', name='padding', constant_values=0):
if mode.upper() not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
raise ValueError("Unknown padding mode: `%s` (accepted: ['CONSTANT', 'REFLECT', 'SYMMETRIC'])" % mode)
net = tf.pad(inputs, paddings=paddings, mode=mode, name=name, constant_values=constant_values)
_log_hparams(
classname='Padding',
layername=net.name,
paddings=paddings,
mode=mode,
constant_values=constant_values,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Industrial/model/layers/padding.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Entry point of the application.
This file serves as entry point to the implementation of UNet3D for
medical image segmentation.
Example usage:
$ python main.py --exec_mode train --data_dir ./data --batch_size 2
--max_steps 1600 --amp
All arguments are listed under `python main.py -h`.
Full argument definition can be found in `arguments.py`.
"""
import os
import numpy as np
import horovod.tensorflow as hvd
from model.model_fn import unet_3d
from dataset.data_loader import Dataset, CLASSES
from runtime.hooks import get_hooks
from runtime.arguments import PARSER
from runtime.setup import build_estimator, set_flags, get_logger
def parse_evaluation_results(result, logger, step=()):
"""
Parse DICE scores from the evaluation results
:param result: Dictionary with metrics collected by the optimizer
:param logger: Logger object
:return:
"""
data = {CLASSES[i]: float(result[CLASSES[i]]) for i in range(len(CLASSES))}
data['mean_dice'] = sum([result[CLASSES[i]] for i in range(len(CLASSES))]) / len(CLASSES)
data['whole_tumor'] = float(result['whole_tumor'])
if hvd.rank() == 0:
logger.log(step=step, data=data)
return data
def main():
""" Starting point of the application """
hvd.init()
set_flags()
params = PARSER.parse_args()
logger = get_logger(params)
dataset = Dataset(data_dir=params.data_dir,
batch_size=params.batch_size,
fold_idx=params.fold,
n_folds=params.num_folds,
input_shape=params.input_shape,
params=params)
estimator = build_estimator(params=params, model_fn=unet_3d)
hooks = get_hooks(params, logger)
if 'train' in params.exec_mode:
max_steps = params.max_steps // (1 if params.benchmark else hvd.size())
estimator.train(
input_fn=dataset.train_fn,
steps=max_steps,
hooks=hooks)
if 'evaluate' in params.exec_mode:
result = estimator.evaluate(input_fn=dataset.eval_fn, steps=dataset.eval_size)
_ = parse_evaluation_results(result, logger)
if params.exec_mode == 'predict':
if hvd.rank() == 0:
predictions = estimator.predict(
input_fn=dataset.test_fn, hooks=hooks)
for idx, pred in enumerate(predictions):
volume = pred['predictions']
if not params.benchmark:
np.save(os.path.join(params.model_dir, "vol_{}.npy".format(idx)), volume)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/main.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Preprocess dataset and prepare it for training
Example usage:
$ python preprocess_data.py --input_dir ./src --output_dir ./dst
--vol_per_file 2
All arguments are listed under `python preprocess_data.py -h`.
"""
import os
import argparse
from random import shuffle
import numpy as np
import nibabel as nib
import tensorflow as tf
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--input_dir', '-i',
type=str, help='path to the input directory with data')
PARSER.add_argument('--output_dir', '-o',
type=str, help='path to the output directory where tfrecord files will be stored')
PARSER.add_argument('--verbose', '-v', dest='verbose', action='store_true', default=False)
PARSER.add_argument('--vol_per_file', default=4, dest='vol_per_file',
type=int, help='how many volumes to pack into a single tfrecord file')
PARSER.add_argument('--single_data_dir', dest='single_data_dir', action='store_true', default=False)
def load_features(path):
""" Load features from Nifti
:param path: Path to dataset
:return: Loaded data
"""
data = np.zeros((240, 240, 155, 4), dtype=np.uint8)
name = os.path.basename(path)
for i, modality in enumerate(["_t1.nii.gz", "_t1ce.nii.gz", "_t2.nii.gz", "_flair.nii.gz"]):
vol = load_single_nifti(os.path.join(path, name + modality)).astype(np.float32)
vol[vol > 0.85 * vol.max()] = 0.85 * vol.max()
vol = 255 * vol / vol.max()
data[..., i] = vol.astype(np.uint8)
return data
def load_segmentation(path):
""" Load segmentations from Nifti
:param path: Path to dataset
:return: Loaded data
"""
path = os.path.join(path, os.path.basename(path)) + "_seg.nii.gz"
return load_single_nifti(path).astype(np.uint8)
def load_single_nifti(path):
""" Load Nifti file as numpy
:param path: Path to file
:return: Loaded data
"""
data = nib.load(path).get_fdata().astype(np.int16)
return np.transpose(data, (1, 0, 2))
def write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, # pylint: disable=R0913
count):
""" Dump numpy array to tfrecord
:param features_list: List of features
:param labels_list: List of labels
:param foreground_mean_list: List of means for each volume
:param foreground_std_list: List of std for each volume
:param output_dir: Directory where to write
:param count: Index of the record
:return:
"""
output_filename = os.path.join(output_dir, "volume-{}.tfrecord".format(count))
filelist = list(zip(np.array(features_list),
np.array(labels_list),
np.array(foreground_mean_list),
np.array(foreground_std_list)))
np_to_tfrecords(filelist, output_filename)
def np_to_tfrecords(filelist, output_filename):
""" Convert numpy array to tfrecord
:param filelist: List of files
:param output_filename: Destination directory
"""
writer = tf.io.TFRecordWriter(output_filename)
for file_item in filelist:
sample = file_item[0].flatten().tostring()
label = file_item[1].flatten().tostring()
mean = file_item[2].astype(np.float32).flatten()
stdev = file_item[3].astype(np.float32).flatten()
d_feature = {}
d_feature['X'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample]))
d_feature['Y'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[label]))
d_feature['mean'] = tf.train.Feature(float_list=tf.train.FloatList(value=mean))
d_feature['stdev'] = tf.train.Feature(float_list=tf.train.FloatList(value=stdev))
features = tf.train.Features(feature=d_feature)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
writer.close()
def main(): # pylint: disable=R0914
""" Starting point of the application"""
params = PARSER.parse_args()
input_dir = params.input_dir
output_dir = params.output_dir
os.makedirs(params.output_dir, exist_ok=True)
patient_list = []
if params.single_data_dir:
patient_list.extend([os.path.join(input_dir, folder) for folder in os.listdir(input_dir)])
else:
assert "HGG" in os.listdir(input_dir) and "LGG" in os.listdir(input_dir), \
"Data directory has to contain folders named HGG and LGG. " \
"If you have a single folder with patient's data please set --single_data_dir flag"
path_hgg = os.path.join(input_dir, "HGG")
path_lgg = os.path.join(input_dir, "LGG")
patient_list.extend([os.path.join(path_hgg, folder) for folder in os.listdir(path_hgg)])
patient_list.extend([os.path.join(path_lgg, folder) for folder in os.listdir(path_lgg)])
shuffle(patient_list)
features_list = []
labels_list = []
foreground_mean_list = []
foreground_std_list = []
count = 0
total_tfrecord_files = len(patient_list) // params.vol_per_file + (1 if len(patient_list) % params.vol_per_file
else 0)
for i, folder in enumerate(patient_list):
# Calculate mean and stdev only for foreground voxels
features = load_features(folder)
foreground = features > 0
fg_mean = np.array([(features[..., i][foreground[..., i]]).mean() for i in range(features.shape[-1])])
fg_std = np.array([(features[..., i][foreground[..., i]]).std() for i in range(features.shape[-1])])
# BraTS labels are 0,1,2,4 -> switching to 0,1,2,3
labels = load_segmentation(folder)
labels[labels == 4] = 3
features_list.append(features)
labels_list.append(labels)
foreground_mean_list.append(fg_mean)
foreground_std_list.append(fg_std)
if (i + 1) % params.vol_per_file == 0:
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
# Clear lists
features_list = []
labels_list = []
foreground_mean_list = []
foreground_std_list = []
count += 1
if params.verbose:
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
# create one more file if there are any remaining unpacked volumes
if features_list:
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
count += 1
if params.verbose:
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/dataset/preprocess_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Transforms for 3D data augmentation """
import tensorflow as tf
def apply_transforms(samples, labels, mean, stdev, transforms):
""" Apply a chain of transforms to a pair of samples and labels """
for _t in transforms:
if _t is not None:
samples, labels = _t(samples, labels, mean, stdev)
return samples, labels
def apply_test_transforms(samples, mean, stdev, transforms):
""" Apply a chain of transforms to a samples using during test """
for _t in transforms:
if _t is not None:
samples = _t(samples, labels=None, mean=mean, stdev=stdev)
return samples
class PadXYZ: # pylint: disable=R0903
""" Pad volume in three dimensiosn """
def __init__(self, shape=None):
""" Add padding
:param shape: Target shape
"""
self.shape = shape
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Padded samples and labels
"""
paddings = tf.constant([[0, 0], [0, 0], [0, 5], [0, 0]])
samples = tf.pad(samples, paddings, "CONSTANT")
if labels is None:
return samples
labels = tf.pad(labels, paddings, "CONSTANT")
return samples, labels
class CenterCrop: # pylint: disable=R0903
""" Produce a central crop in 3D """
def __init__(self, shape):
""" Create op
:param shape: Target shape for crop
"""
self.shape = shape
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Cropped samples and labels
"""
shape = samples.get_shape()
delta = [(shape[i].value - self.shape[i]) // 2 for i in range(len(self.shape))]
samples = samples[
delta[0]:delta[0] + self.shape[0],
delta[1]:delta[1] + self.shape[1],
delta[2]:delta[2] + self.shape[2]]
if labels is None:
return samples
labels = labels[
delta[0]:delta[0] + self.shape[0],
delta[1]:delta[1] + self.shape[1],
delta[2]:delta[2] + self.shape[2]]
return samples, labels
class RandomCrop3D: # pylint: disable=R0903
""" Produce a random 3D crop """
def __init__(self, shape, margins=(0, 0, 0)):
""" Create op
:param shape: Target shape
:param margins: Margins within to perform the crop
"""
self.shape = shape
self.margins = margins
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Cropped samples and labels
"""
shape = samples.get_shape()
min_ = tf.constant(self.margins, dtype=tf.float32)
max_ = tf.constant([shape[0].value - self.shape[0] - self.margins[0],
shape[1].value - self.shape[1] - self.margins[1],
shape[2].value - self.shape[2] - self.margins[2]],
dtype=tf.float32)
center = tf.random_uniform((len(self.shape),), minval=min_, maxval=max_)
center = tf.cast(center, dtype=tf.int32)
samples = samples[center[0]:center[0] + self.shape[0],
center[1]:center[1] + self.shape[1],
center[2]:center[2] + self.shape[2]]
if labels is None:
return samples
labels = labels[center[0]:center[0] + self.shape[0],
center[1]:center[1] + self.shape[1],
center[2]:center[2] + self.shape[2]]
return samples, labels
class NormalizeImages: # pylint: disable=R0903
""" Run zscore normalization """
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean
:param stdev: Std
:return: Normalized samples and labels
"""
mask = tf.math.greater(samples, 0)
samples = tf.where(mask, (samples - tf.cast(mean, samples.dtype)) / (tf.cast(stdev + 1e-8, samples.dtype)),
samples)
if labels is None:
return samples
return samples, labels
class Cast: # pylint: disable=R0903
""" Cast samples and labels to different precision """
def __init__(self, dtype=tf.float32):
self._dtype = dtype
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Casted samples and labels
"""
if labels is None:
return tf.cast(samples, dtype=self._dtype)
return tf.cast(samples, dtype=self._dtype), labels
class RandomHorizontalFlip: # pylint: disable=R0903
""" Randomly flip horizontally a pair of samples and labels"""
def __init__(self, threshold=0.5):
self._threshold = threshold
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Flipped samples and labels
"""
h_flip = tf.random_uniform([]) > self._threshold
samples = tf.cond(h_flip, lambda: tf.reverse(samples, axis=[1]), lambda: samples)
labels = tf.cond(h_flip, lambda: tf.reverse(labels, axis=[1]), lambda: labels)
return samples, labels
class RandomVerticalFlip: # pylint: disable=R0903
""" Randomly flip vertically a pair of samples and labels"""
def __init__(self, threshold=0.5):
self._threshold = threshold
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Flipped samples and labels
"""
h_flip = tf.random_uniform([]) > self._threshold
samples = tf.cond(h_flip, lambda: tf.reverse(samples, axis=[0]), lambda: samples)
labels = tf.cond(h_flip, lambda: tf.reverse(labels, axis=[0]), lambda: labels)
return samples, labels
class RandomGammaCorrection: # pylint: disable=R0903
""" Random gamma correction over samples """
def __init__(self, gamma_range=(0.8, 1.5), keep_stats=False, threshold=0.5, epsilon=1e-8):
self._gamma_range = gamma_range
self._keep_stats = keep_stats
self._eps = epsilon
self._threshold = threshold
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Gamma corrected samples
"""
augment = tf.random_uniform([]) > self._threshold
gamma = tf.random_uniform([], minval=self._gamma_range[0], maxval=self._gamma_range[1])
x_min = tf.math.reduce_min(samples)
x_range = tf.math.reduce_max(samples) - x_min
samples = tf.cond(augment,
lambda: tf.math.pow(((samples - x_min) / float(x_range + self._eps)),
gamma) * x_range + x_min,
lambda: samples)
return samples, labels
class RandomBrightnessCorrection: # pylint: disable=R0903
""" Random brightness correction over samples """
def __init__(self, alpha=0.1, threshold=0.5, per_channel=True):
self._alpha_range = [1.0 - alpha, 1.0 + alpha]
self._threshold = threshold
self._per_channel = per_channel
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: Brightness corrected samples
"""
mask = tf.math.greater(samples, 0)
size = samples.get_shape()[-1].value if self._per_channel else 1
augment = tf.random_uniform([]) > self._threshold
correction = tf.random_uniform([size],
minval=self._alpha_range[0],
maxval=self._alpha_range[1],
dtype=samples.dtype)
samples = tf.cond(augment,
lambda: tf.where(mask, samples + correction, samples),
lambda: samples)
return samples, labels
class OneHotLabels: # pylint: disable=R0903
""" One hot encoding of labels """
def __init__(self, n_classes=1):
self._n_classes = n_classes
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays (unused)
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: One hot encoded labels
"""
return samples, tf.one_hot(labels, self._n_classes)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/dataset/transforms.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data loader """
import os
import numpy as np
import horovod.tensorflow as hvd
import tensorflow as tf
from dataset.transforms import NormalizeImages, OneHotLabels, apply_transforms, PadXYZ, RandomCrop3D, \
RandomHorizontalFlip, RandomBrightnessCorrection, CenterCrop, \
apply_test_transforms, Cast
CLASSES = {0: "tumor_core", 1: "peritumoral_edema", 2: "enhancing_tumor"}
def cross_validation(arr: np.ndarray, fold_idx: int, n_folds: int):
""" Split data into folds for training and evaluation
:param arr: Collection items to split
:param fold_idx: Index of crossvalidation fold
:param n_folds: Total number of folds
:return: Train and Evaluation folds
"""
if fold_idx < 0 or fold_idx >= n_folds:
raise ValueError('Fold index has to be [0, n_folds). Received index {} for {} folds'.format(fold_idx, n_folds))
_folders = np.array_split(arr, n_folds)
return np.concatenate(_folders[:fold_idx] + _folders[fold_idx + 1:]), _folders[fold_idx]
class Dataset: # pylint: disable=R0902
""" Class responsible for the data loading during training, inference and evaluation """
def __init__(self, data_dir, batch_size=2, input_shape=(128, 128, 128), # pylint: disable=R0913
fold_idx=0, n_folds=5, seed=0, params=None):
""" Creates and configures the dataset
:param data_dir: Directory where the data is stored
:param batch_size: Number of pairs to be provided by batch
:param input_shape: Dimension of the input to the model
:param fold_idx: Fold index for crossvalidation
:param n_folds: Total number of folds in crossvalidation
:param seed: Random seed
:param params: Dictionary with additional configuration parameters
"""
self._folders = np.array([os.path.join(data_dir, path) for path in os.listdir(data_dir)
if path.endswith(".tfrecords")])
assert len(self._folders) > 0, "No matching data found at {}".format(data_dir)
self._train, self._eval = cross_validation(self._folders, fold_idx=fold_idx, n_folds=n_folds)
self._input_shape = input_shape
self._data_dir = data_dir
self.params = params
self._batch_size = batch_size
self._seed = seed
self._xshape = (240, 240, 155, 4)
self._yshape = (240, 240, 155)
def parse(self, serialized):
""" Parse TFRecord
:param serialized: Serialized record for a particular example
:return: sample, label, mean and std of intensities
"""
features = {
'X': tf.io.FixedLenFeature([], tf.string),
'Y': tf.io.FixedLenFeature([], tf.string),
'mean': tf.io.FixedLenFeature([4], tf.float32),
'stdev': tf.io.FixedLenFeature([4], tf.float32)
}
parsed_example = tf.io.parse_single_example(serialized=serialized,
features=features)
sample = tf.io.decode_raw(parsed_example['X'], tf.uint8)
sample = tf.cast(tf.reshape(sample, self._xshape), tf.uint8)
label = tf.io.decode_raw(parsed_example['Y'], tf.uint8)
label = tf.cast(tf.reshape(label, self._yshape), tf.uint8)
mean = parsed_example['mean']
stdev = parsed_example['stdev']
return sample, label, mean, stdev
def parse_x(self, serialized):
""" Parse only the sample in a TFRecord with sample and label
:param serialized:
:return: sample, mean and std of intensities
"""
features = {'X': tf.io.FixedLenFeature([], tf.string),
'Y': tf.io.FixedLenFeature([], tf.string),
'mean': tf.io.FixedLenFeature([4], tf.float32),
'stdev': tf.io.FixedLenFeature([4], tf.float32)}
parsed_example = tf.io.parse_single_example(serialized=serialized,
features=features)
sample = tf.io.decode_raw(parsed_example['X'], tf.uint8)
sample = tf.cast(tf.reshape(sample, self._xshape), tf.uint8)
mean = parsed_example['mean']
stdev = parsed_example['stdev']
return sample, mean, stdev
def train_fn(self):
""" Create dataset for training """
if 'debug' in self.params.exec_mode:
return self.synth_train_fn()
assert len(self._train) > 0, "Training data not found."
dataset = tf.data.TFRecordDataset(filenames=self._train)
dataset = dataset.shard(hvd.size(), hvd.rank())
dataset = dataset.cache()
dataset = dataset.shuffle(buffer_size=self._batch_size * 8, seed=self._seed)
dataset = dataset.repeat()
dataset = dataset.map(self.parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
transforms = [
RandomCrop3D(self._input_shape),
RandomHorizontalFlip() if self.params.augment else None,
Cast(dtype=tf.float32),
NormalizeImages(),
RandomBrightnessCorrection() if self.params.augment else None,
OneHotLabels(n_classes=4),
]
dataset = dataset.map(
map_func=lambda x, y, mean, stdev: apply_transforms(x, y, mean, stdev, transforms=transforms),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size=self._batch_size,
drop_remainder=True)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
if self._batch_size == 1:
options = dataset.options()
options.experimental_optimization.map_and_batch_fusion = False
dataset = dataset.with_options(options)
return dataset
def eval_fn(self):
""" Create dataset for evaluation """
dataset = tf.data.TFRecordDataset(filenames=self._eval)
assert len(self._eval) > 0, "Evaluation data not found. Did you specify --fold flag?"
dataset = dataset.cache()
dataset = dataset.map(self.parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
transforms = [
CenterCrop((224, 224, 155)),
Cast(dtype=tf.float32),
NormalizeImages(),
OneHotLabels(n_classes=4),
PadXYZ()
]
dataset = dataset.map(
map_func=lambda x, y, mean, stdev: apply_transforms(x, y, mean, stdev, transforms=transforms),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size=self._batch_size,
drop_remainder=False)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def test_fn(self):
""" Create dataset for inference """
if 'debug' in self.params.exec_mode:
return self.synth_predict_fn()
count = 1 if not self.params.benchmark \
else 2 * self.params.warmup_steps * self.params.batch_size // self.test_size
dataset = tf.data.TFRecordDataset(filenames=self._eval)
assert len(self._eval) > 0, "Evaluation data not found. Did you specify --fold flag?"
dataset = dataset.repeat(count)
dataset = dataset.map(self.parse_x, num_parallel_calls=tf.data.experimental.AUTOTUNE)
transforms = [
CenterCrop((224, 224, 155)),
Cast(dtype=tf.float32),
NormalizeImages(),
PadXYZ((224, 224, 160))
]
dataset = dataset.map(
map_func=lambda x, mean, stdev: apply_test_transforms(x, mean, stdev, transforms=transforms),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size=self._batch_size,
drop_remainder=self.params.benchmark)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def export_fn(self):
""" Create dataset for calibrating and exporting """
dataset = tf.data.TFRecordDataset(filenames=self._eval)
assert len(self._eval) > 0, "Evaluation data not found. Did you specify --fold flag?"
dataset = dataset.repeat(1)
dataset = dataset.map(self.parse_x, num_parallel_calls=tf.data.experimental.AUTOTUNE)
transforms = [
CenterCrop((224, 224, 155)),
Cast(dtype=tf.float32),
NormalizeImages(),
PadXYZ((224, 224, 160))
]
dataset = dataset.map(
map_func=lambda x, mean, stdev: apply_test_transforms(x, mean, stdev, transforms=transforms),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size=self._batch_size,
drop_remainder=True)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def synth_train_fn(self):
""" Synthetic data function for training """
inputs = tf.random.uniform(self._xshape, dtype=tf.int32, minval=0, maxval=255, seed=self._seed,
name='synth_inputs')
masks = tf.random.uniform(self._yshape, dtype=tf.int32, minval=0, maxval=4, seed=self._seed,
name='synth_masks')
mean = tf.random.uniform((4,), dtype=tf.float32, minval=0, maxval=255, seed=self._seed)
stddev = tf.random.uniform((4,), dtype=tf.float32, minval=0, maxval=1, seed=self._seed)
dataset = tf.data.Dataset.from_tensors((inputs, masks))
dataset = dataset.repeat()
transforms = [
Cast(dtype=tf.uint8),
RandomCrop3D((128, 128, 128)),
RandomHorizontalFlip() if self.params.augment else None,
Cast(dtype=tf.float32),
NormalizeImages(),
RandomBrightnessCorrection() if self.params.augment else None,
OneHotLabels(n_classes=4),
]
dataset = dataset.map(map_func=lambda x, y: apply_transforms(x, y, mean, stddev, transforms),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def synth_predict_fn(self):
"""Synthetic data function for testing"""
inputs = tf.random.truncated_normal((224, 224, 160, 4), dtype=tf.float32, mean=0.0, stddev=1.0, seed=self._seed,
name='synth_inputs')
count = 2 * self.params.warmup_steps
dataset = tf.data.Dataset.from_tensors(inputs)
dataset = dataset.repeat(count)
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
@property
def train_size(self):
""" Number of pairs in the training set """
return len(self._train)
@property
def eval_size(self):
""" Number of pairs in the validation set """
return len(self._eval)
@property
def test_size(self):
""" Number of pairs in the test set """
return len(self._eval)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/dataset/data_loader.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Hooks for metric collection and benchmarking """
import time
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
def get_hooks(params, logger):
""" Get the appropriate set of hooks given the configuration
:param params: Dict with additional parameters
:param logger: Logger object
:return: Set of hooks
"""
hooks = []
if params.exec_mode == 'debug_train':
return get_debug_training_hooks(logger, params)
if params.exec_mode == 'debug_predict':
return get_debug_predict_hooks(logger, params)
if 'train' in params.exec_mode:
return get_training_hooks(logger, params)
if params.exec_mode == 'predict':
return get_predict_hooks(logger, params)
return hooks
def get_debug_predict_hooks(logger, params):
""" Return hooks for debugging prediction
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = []
if hvd.rank() == 0:
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=params.batch_size,
logger=logger,
mode='inference')]
return hooks
def get_debug_training_hooks(logger, params):
""" Return hooks for debugging training
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if hvd.rank() == 0:
hooks += [TrainingHook(log_every=params.log_every,
logger=logger,
tensor_names=['total_loss_ref:0']),
ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=hvd.size() * params.batch_size,
logger=logger,
mode='train')]
return hooks
def get_predict_hooks(logger, params):
""" Return hooks for prediction
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = []
if hvd.rank() == 0:
if params.benchmark:
hooks = [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=params.batch_size,
logger=logger,
mode='test')]
return hooks
def get_training_hooks(logger, params):
""" Return hooks for training
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if hvd.rank() == 0:
hooks += [OomReportingHook()]
if params.benchmark:
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=hvd.size() * params.batch_size,
logger=logger,
mode='train')]
else:
hooks += [TrainingHook(log_every=params.log_every,
logger=logger,
tensor_names=['total_loss_ref:0'])]
return hooks
class ProfilingHook(tf.estimator.SessionRunHook):
""" Hook for profiling metrics """
def __init__(self, warmup_steps, global_batch_size, logger, mode):
""" Build hook
:param warmup_steps: Number of steps to skip initially
:param global_batch_size: Number of samples per bach in all gpus
:param logger: Logger object
:param mode: Estimator's execution mode
"""
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._step = 0
self._timestamps = []
self._logger = logger
self._mode = mode
def before_run(self, _):
""" Execute before run """
self._step += 1
if self._step >= self._warmup_steps:
self._timestamps.append(time.time())
def end(self, _):
""" Execute on completion """
deltas = np.array([self._timestamps[i + 1] - self._timestamps[i] for i in range(len(self._timestamps) - 1)])
stats = process_performance_stats(np.array(deltas),
self._global_batch_size,
self._mode)
self._logger.log(step=(), data=stats)
self._logger.flush()
class TrainingHook(tf.estimator.SessionRunHook):
""" Hook for training metrics """
def __init__(self, log_every, logger, tensor_names):
""" Build hook for training
:param log_every: Logging frequency
:param logger: Logger object
:param tensor_names: Names of the tensors to log
"""
self._log_every = log_every
self._step = 0
self._logger = logger
self._tensor_names = tensor_names
def before_run(self, _):
""" Execute before run """
run_args = tf.compat.v1.train.SessionRunArgs(
fetches=self._tensor_names
)
return run_args
def after_run(self,
_,
run_values):
""" Execute after run
:param run_values: Values to capture
:return:
"""
if self._step % self._log_every == 0:
for i in range(len(self._tensor_names)):
self._logger.log(step=(self._step,), data={self._tensor_names[i]: str(run_values.results[i])})
self._step += 1
def end(self, _):
""" Execute on completion """
self._logger.flush()
class OomReportingHook(tf.estimator.SessionRunHook): # pylint: disable=R0903
""" Report for out of memory errors"""
def before_run(self, _): # pylint: disable=R0201
""" Execute before run """
return tf.estimator.SessionRunArgs(fetches=[], # no extra fetches
options=tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom=True))
def process_performance_stats(timestamps, batch_size, mode):
""" Get confidence intervals
:param timestamps: Collection of timestamps
:param batch_size: Number of samples per batch
:param mode: Estimator's execution mode
:return: Stats
"""
timestamps_ms = 1000 * timestamps
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = {f"throughput_{mode}": throughput_imgps,
f"latency_{mode}_mean": timestamps_ms.mean()}
for level in [90, 95, 99]:
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
return stats
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/runtime/hooks.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Parsing of results"""
import os
import argparse
def parse_convergence_results(path, environment):
""" Parse convergence results utility
:param path: Path to results
:param environment: System environment
"""
whole_tumor = []
tumor_core = []
peritumoral_edema = []
enhancing_tumor = []
mean_dice = []
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(os.path.join(path, logfile), "r") as file_item:
content = file_item.readlines()
if "tumor_core" not in content[-1]:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
content = content[-1].split("()")[1]
whole_tumor.append(float([val for val in content.split(" ")
if "whole_tumor" in val][0].split()[-1]))
tumor_core.append(float([val for val in content.split(" ")
if "tumor_core" in val][0].split()[-1]))
peritumoral_edema.append(float([val for val in content.split(" ")
if "peritumoral_edema" in val][0].split()[-1]))
enhancing_tumor.append(float([val for val in content.split(" ")
if "enhancing_tumor" in val][0].split()[-1]))
mean_dice.append(float([val for val in content.split(" ")
if "mean_dice" in val][0].split()[-1]))
if whole_tumor:
print("Evaluation average dice score:", sum(mean_dice) / len(mean_dice))
print("Evaluation whole tumor dice score:", sum(whole_tumor) / len(whole_tumor))
print("Evaluation tumor core dice score:", sum(tumor_core) / len(tumor_core))
print("Evaluation peritumoral edema dice score:", sum(peritumoral_edema) / len(peritumoral_edema))
print("Evaluation enhancing tumor dice score:", sum(enhancing_tumor) / len(enhancing_tumor))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
type=str,
required=True)
args = parser.parse_args()
parse_convergence_results(path=args.model_dir, environment=args.env)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/runtime/parse_results.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Command line argument parsing """
import argparse
PARSER = argparse.ArgumentParser(description="UNet-3D")
# Estimator flags
PARSER.add_argument('--model_dir', required=True, type=str)
PARSER.add_argument('--exec_mode', choices=['train', 'evaluate', 'train_and_evaluate',
'predict', 'debug_train', 'debug_predict'], type=str)
# Training flags
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true', default=False)
PARSER.add_argument('--max_steps', default=16000, type=int)
PARSER.add_argument('--learning_rate', default=0.0002, type=float)
PARSER.add_argument('--log_every', default=100, type=int)
PARSER.add_argument('--log_dir', type=str)
PARSER.add_argument('--loss', choices=['dice', 'ce', 'dice+ce'], default='dice+ce', type=str)
PARSER.add_argument('--warmup_steps', default=40, type=int)
PARSER.add_argument('--normalization', choices=['instancenorm', 'batchnorm', 'groupnorm'],
default='instancenorm', type=str)
PARSER.add_argument('--include_background', dest='include_background', action='store_true', default=False)
PARSER.add_argument('--resume_training', dest='resume_training', action='store_true', default=False)
PARSER.add_argument('--seed', default=0, type=int)
# Augmentations
PARSER.add_argument('--augment', dest='augment', action='store_true', default=False)
# Dataset flags
PARSER.add_argument('--data_dir', required=True, type=str)
PARSER.add_argument('--input_shape', nargs='+', type=int, default=[128, 128, 128])
PARSER.add_argument('--batch_size', default=1, type=int)
PARSER.add_argument('--fold', default=0, type=int)
PARSER.add_argument('--num_folds', default=5, type=int)
# Tensorflow configuration flags
PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true', default=False)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/runtime/arguments.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils for setting up different parts of the execution """
import os
import multiprocessing
import numpy as np
import dllogger as logger
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
import tensorflow as tf
import horovod.tensorflow as hvd
def set_flags():
""" Set necessary flags for execution """
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_CACHE_DISABLE'] = '1'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
def prepare_model_dir(params):
""" Prepare the directory where checkpoints are stored
:param params: Dict with additional parameters
:return: Path to model dir
"""
model_dir = os.path.join(params.model_dir, "model_checkpoint")
model_dir = model_dir if (hvd.rank() == 0 and not params.benchmark) else None
if model_dir is not None:
os.makedirs(model_dir, exist_ok=True)
if ('train' in params.exec_mode) and (not params.resume_training):
os.system('rm -rf {}/*'.format(model_dir))
return model_dir
def build_estimator(params, model_fn):
""" Build estimator
:param params: Dict with additional parameters
:param model_fn: Model graph
:return: Estimator
"""
np.random.seed(params.seed)
tf.compat.v1.random.set_random_seed(params.seed)
model_dir = prepare_model_dir(params)
config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(), allow_soft_placement=True)
if params.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)
if params.use_amp:
config.graph_options.rewrite_options.auto_mixed_precision = 1
checkpoint_steps = (params.max_steps // hvd.size()) if hvd.rank() == 0 else None
checkpoint_steps = checkpoint_steps if not params.benchmark else None
run_config = tf.estimator.RunConfig(
save_summary_steps=params.max_steps,
tf_random_seed=params.seed,
session_config=config,
save_checkpoints_steps=checkpoint_steps,
keep_checkpoint_max=1)
return tf.estimator.Estimator(model_fn=model_fn,
model_dir=model_dir,
config=run_config,
params=params)
def get_logger(params):
""" Get logger object
:param params: Dict with additional parameters
:return: logger
"""
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.VERBOSE)]
if params.log_dir:
backends += [JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)]
logger.init(backends=backends)
logger.metadata("whole_tumor", {"unit": None})
logger.metadata("throughput_test", {"unit": "volumes/s"})
logger.metadata("throughput_train", {"unit": "volumes/s"})
return logger
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/runtime/setup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" UNet3D model construction """
from model.layers import downsample_block, upsample_block, output_layer, input_block
class Builder: # pylint: disable=R0903
""" Model builder """
def __init__(self, n_classes, mode, normalization='none'):
""" Configure the unet3d builder
:param n_classes: Number of output channels
:param mode: Estimator's execution mode
:param normalization: Name of the normalization layer
"""
self._n_classes = n_classes
self._mode = mode
self._normalization = normalization
def __call__(self, features):
""" Build UNet3D
:param features: Input features
:return: Output of the graph
"""
skip_128 = input_block(inputs=features,
out_channels=32,
normalization=self._normalization,
mode=self._mode)
skip_64 = downsample_block(inputs=skip_128,
out_channels=64,
normalization=self._normalization,
mode=self._mode)
skip_32 = downsample_block(inputs=skip_64,
out_channels=128,
normalization=self._normalization,
mode=self._mode)
skip_16 = downsample_block(inputs=skip_32,
out_channels=256,
normalization=self._normalization,
mode=self._mode)
skip_8 = downsample_block(inputs=skip_16,
out_channels=320,
normalization=self._normalization,
mode=self._mode)
out = downsample_block(inputs=skip_8,
out_channels=320,
normalization=self._normalization,
mode=self._mode)
out = upsample_block(out, skip_8,
out_channels=320,
normalization=self._normalization,
mode=self._mode)
out = upsample_block(out, skip_16,
out_channels=256,
normalization=self._normalization,
mode=self._mode)
out = upsample_block(out, skip_32,
out_channels=128,
normalization=self._normalization,
mode=self._mode)
out = upsample_block(out, skip_64,
out_channels=64,
normalization=self._normalization,
mode=self._mode)
out = upsample_block(out, skip_128,
out_channels=32,
normalization=self._normalization,
mode=self._mode)
return output_layer(out,
out_channels=self._n_classes,
activation='softmax')
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/model/unet3d.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model function in charge to collect metrics and feed them to the optimizer """
import horovod.tensorflow as hvd
import tensorflow as tf
from model.unet3d import Builder
from model.losses import make_loss, eval_dice, total_dice
from dataset.data_loader import CLASSES
def unet_3d(features, labels, mode, params):
""" Gather loss and feed it to the optimizer
:param features: Input features
:param labels: Input labels
:param mode: Estimator's execution mode
:param params: Dict with additional parameters
:return: Estimator spec
"""
# TODO: Find a better way to handle the empty params namespace
try:
normalization = params.normalization
except:
normalization = 'instancenorm'
input_node = tf.identity(features, name='input_node')
logits = Builder(n_classes=4, normalization=normalization, mode=mode)(input_node)
logits = tf.identity(logits, name='output_node')
if mode == tf.estimator.ModeKeys.PREDICT:
prediction = tf.argmax(input=logits, axis=-1, output_type=tf.dtypes.int32, name="predictions")
return tf.estimator.EstimatorSpec(mode=mode,
predictions={'predictions': tf.cast(prediction, tf.int8)})
labels = tf.cast(labels, tf.float32)
if mode == tf.estimator.ModeKeys.EVAL:
prediction = tf.argmax(input=logits, axis=-1, output_type=tf.dtypes.int32)
prediction = tf.one_hot(prediction, 4)
if not params.include_background:
labels = labels[..., 1:]
prediction = prediction[..., 1:]
prediction = tf.cast(prediction, tf.float32)
eval_acc = eval_dice(y_true=labels, y_pred=prediction)
total_eval_acc = total_dice(prediction, labels)
metrics = {CLASSES[i]: tf.compat.v1.metrics.mean(eval_acc[i]) for i in range(eval_acc.shape[-1])}
metrics['whole_tumor'] = tf.compat.v1.metrics.mean(total_eval_acc)
return tf.estimator.EstimatorSpec(mode=mode, loss=tf.reduce_mean(eval_acc),
eval_metric_ops=metrics)
if not params.include_background:
labels = labels[..., 1:]
logits = logits[..., 1:]
loss = make_loss(params, y_pred=logits, y_true=labels)
loss = tf.identity(loss, name="total_loss_ref")
global_step = tf.compat.v1.train.get_or_create_global_step()
boundaries = [params.max_steps // (2 * hvd.size()),
params.max_steps // (2 * hvd.size()),
3 * params.max_steps // (4 * hvd.size())]
lr = params.learning_rate
values = [lr / 4, lr, lr / 5, lr / 20]
learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries, values)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if params.use_amp:
loss_scale = tf.train.experimental.DynamicLossScale()
optimizer = tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer(optimizer, loss_scale)
optimizer = hvd.DistributedOptimizer(optimizer)
with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/model/model_fn.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Different losses for UNet3D """
import tensorflow as tf
def make_loss(params, y_true, y_pred):
""" Factory method for loss functions
:param params: Dict with additional parameters
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: Loss
"""
if params.loss == 'dice':
return _dice(y_true, y_pred)
if params.loss == 'ce':
return _ce(y_true, y_pred)
if params.loss == 'dice+ce':
return tf.add(_ce(y_true, y_pred), _dice(y_true, y_pred), name="total_loss_ref")
raise ValueError('Unknown loss: {}'.format(params.loss))
def _ce(y_true, y_pred):
""" Crossentropy
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return tf.reduce_sum(
tf.reduce_mean(tf.keras.backend.binary_crossentropy(tf.cast(y_true, tf.float32), y_pred), axis=[0, 1, 2, 3]),
name='crossentropy_loss_ref')
def _dice(y_true, y_pred):
""" Training dice
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return tf.reduce_sum(dice_loss(predictions=y_pred, targets=y_true), name='dice_loss_ref')
def eval_dice(y_true, y_pred):
""" Evaluation dice
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return 1 - dice_loss(predictions=y_pred, targets=y_true)
def dice_loss(predictions,
targets,
squared_pred=False,
smooth=1e-5,
top_smooth=0.0):
""" Dice
:param predictions: Predicted labels
:param targets: Ground truth labels
:param squared_pred: Square the predicate
:param smooth: Smooth term for denominator
:param top_smooth: Smooth term for numerator
:return: loss
"""
is_channels_first = False
n_len = len(predictions.get_shape())
reduce_axis = list(range(2, n_len)) if is_channels_first else list(range(1, n_len - 1))
intersection = tf.reduce_sum(targets * predictions, axis=reduce_axis)
if squared_pred:
targets = tf.square(targets)
predictions = tf.square(predictions)
y_true_o = tf.reduce_sum(targets, axis=reduce_axis)
y_pred_o = tf.reduce_sum(predictions, axis=reduce_axis)
denominator = y_true_o + y_pred_o
dice = (2.0 * intersection + top_smooth) / (denominator + smooth)
return 1 - tf.reduce_mean(dice, axis=0)
def total_dice(predictions,
targets,
smooth=1e-5,
top_smooth=0.0):
""" Total Dice
:param predictions: Predicted labels
:param targets: Ground truth labels
:param smooth: Smooth term for denominator
:param top_smooth: Smooth term for numerator
:return: loss
"""
n_len = len(predictions.get_shape())
reduce_axis = list(range(1, n_len-1))
targets = tf.reduce_sum(targets, axis=-1)
predictions = tf.reduce_sum(predictions, axis=-1)
intersection = tf.reduce_sum(targets * predictions, axis=reduce_axis)
y_true_o = tf.reduce_sum(targets, axis=reduce_axis)
y_pred_o = tf.reduce_sum(predictions, axis=reduce_axis)
denominator = y_true_o + y_pred_o
return tf.reduce_mean((2.0 * intersection + top_smooth) / (denominator + smooth))
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/model/losses.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" High level definition of layers for model construction """
import tensorflow as tf
def _normalization(inputs, name, mode):
""" Choose a normalization layer
:param inputs: Input node from the graph
:param name: Name of layer
:param mode: Estimator's execution mode
:return: Normalized output
"""
training = mode == tf.estimator.ModeKeys.TRAIN
if name == 'instancenorm':
gamma_initializer = tf.constant_initializer(1.0)
return tf.contrib.layers.instance_norm(
inputs,
center=True,
scale=True,
epsilon=1e-6,
param_initializers={'gamma': gamma_initializer},
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format='NHWC',
scope=None)
if name == 'groupnorm':
return tf.contrib.layers.group_norm(inputs=inputs,
groups=16,
channels_axis=-1,
reduction_axes=(-4, -3, -2),
activation_fn=None,
trainable=True)
if name == 'batchnorm':
return tf.keras.layers.BatchNormalization(axis=-1,
trainable=True,
virtual_batch_size=None)(inputs, training=training)
if name == 'none':
return inputs
raise ValueError('Invalid normalization layer')
def _activation(out, activation):
""" Choose an activation layer
:param out: Input node from the graph
:param activation: Name of layer
:return: Activation output
"""
if activation == 'relu':
return tf.nn.relu(out)
if activation == 'leaky_relu':
return tf.nn.leaky_relu(out, alpha=0.01)
if activation == 'sigmoid':
return tf.nn.sigmoid(out)
if activation == 'softmax':
return tf.nn.softmax(out, axis=-1)
if activation == 'none':
return out
raise ValueError("Unknown activation {}".format(activation))
def convolution(inputs, # pylint: disable=R0913
out_channels,
kernel_size=3,
stride=1,
mode=tf.estimator.ModeKeys.TRAIN,
normalization='batchnorm',
activation='leaky_relu',
transpose=False):
""" Create a convolution layer
:param inputs: Input node from graph
:param out_channels: Output number of channels
:param kernel_size: Size of the kernel
:param stride: Stride of the kernel
:param mode: Estimator's execution mode
:param normalization: Name of the normalization layer
:param activation: Name of the activation layer
:param transpose: Select between regular and transposed convolution
:return: Convolution output
"""
if transpose:
conv = tf.keras.layers.Conv3DTranspose
else:
conv = tf.keras.layers.Conv3D
regularizer = None # tf.keras.regularizers.l2(1e-5)
use_bias = normalization == "none"
inputs = conv(filters=out_channels,
kernel_size=kernel_size,
strides=stride,
activation=None,
padding='same',
data_format='channels_last',
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(),
kernel_regularizer=regularizer,
bias_initializer=tf.zeros_initializer(),
bias_regularizer=regularizer,
use_bias=use_bias)(inputs)
inputs = _normalization(inputs, normalization, mode)
return _activation(inputs, activation)
def upsample_block(inputs, skip_connection, out_channels, normalization, mode):
""" Create a block for upsampling
:param inputs: Input node from the graph
:param skip_connection: Choose whether or not to use skip connection
:param out_channels: Number of output channels
:param normalization: Name of the normalizaiton layer
:param mode: Estimator's execution mode
:return: Output from the upsample block
"""
inputs = convolution(inputs, kernel_size=2, out_channels=out_channels, stride=2,
normalization='none', activation='none', transpose=True)
inputs = tf.keras.layers.Concatenate(axis=-1)([inputs, skip_connection])
inputs = convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode)
inputs = convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode)
return inputs
def input_block(inputs, out_channels, normalization, mode):
""" Create the input block
:param inputs: Input node from the graph
:param out_channels: Number of output channels
:param normalization: Name of the normalization layer
:param mode: Estimator's execution mode
:return: Output from the input block
"""
inputs = convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode)
inputs = convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode)
return inputs
def downsample_block(inputs, out_channels, normalization, mode):
""" Create a downsample block
:param inputs: Input node from the graph
:param out_channels: Number of output channels
:param normalization: Name of the normalization layer
:param mode: Estimator's execution mode
:return: Output from the downsample block
"""
inputs = convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode, stride=2)
return convolution(inputs, out_channels=out_channels, normalization=normalization, mode=mode)
def output_layer(inputs, out_channels, activation):
""" Create the output layer
:param inputs: Input node from the graph
:param out_channels: Number of output channels
:param activation: Name of the activation layer
:return: Output from the output block
"""
return convolution(inputs, out_channels=out_channels, kernel_size=3, normalization='none', activation=activation)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_3D_Medical/model/layers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import tarfile
from google_drive_downloader import GoogleDriveDownloader as gdd
PARSER = argparse.ArgumentParser(description="V-Net medical")
PARSER.add_argument('--data_dir',
type=str,
default='./data',
help="""Directory where to download the dataset""")
PARSER.add_argument('--dataset',
type=str,
default='hippocampus',
help="""Dataset to download""")
def main():
FLAGS = PARSER.parse_args()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
filename = ''
if FLAGS.dataset == 'hippocampus':
filename = 'Task04_Hippocampus.tar'
gdd.download_file_from_google_drive(file_id='1RzPB1_bqzQhlWvU-YGvZzhx2omcDh38C',
dest_path=os.path.join(FLAGS.data_dir, filename),
unzip=False)
print('Unpacking...')
tf = tarfile.open(os.path.join(FLAGS.data_dir, filename))
tf.extractall(path=FLAGS.data_dir)
print('Cleaning up...')
os.remove(os.path.join(FLAGS.data_dir, filename))
print("Finished downloading files for V-Net medical to {}".format(FLAGS.data_dir))
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/download_dataset.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import tensorflow as tf
from utils.data_loader import MSDDataset
from utils.model_fn import vnet_v2
from utils.tf_export import to_savedmodel, to_tf_trt, to_onnx
PARSER = argparse.ArgumentParser(description="V-Net")
PARSER.add_argument('--to', dest='to', choices=['savedmodel', 'tftrt', 'onnx'], required=True)
PARSER.add_argument('--use_amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--use_xla', dest='use_xla', action='store_true', default=False)
PARSER.add_argument('--compress', dest='compress', action='store_true', default=False)
PARSER.add_argument('--input_shape',
nargs='+',
type=int,
help="""Model's input shape""")
PARSER.add_argument('--data_dir',
type=str,
help="""Directory where the dataset is located""")
PARSER.add_argument('--checkpoint_dir',
type=str,
help="""Directory where the checkpoint is located""")
PARSER.add_argument('--savedmodel_dir',
type=str,
help="""Directory where the savedModel is located""")
PARSER.add_argument('--precision',
type=str,
choices=['FP32', 'FP16', 'INT8'],
help="""Precision for the model""")
def main():
"""
Starting point of the application
"""
flags = PARSER.parse_args()
if flags.to == 'savedmodel':
params = {
'labels': ['0', '1', '2'],
'batch_size': 1,
'input_shape': flags.input_shape,
'convolution_size': 3,
'downscale_blocks': [3, 3, 3],
'upscale_blocks': [3, 3],
'upsampling': 'transposed_conv',
'pooling': 'conv_pool',
'normalization_layer': 'batchnorm',
'activation': 'relu'
}
to_savedmodel(input_shape=flags.input_shape,
model_fn=vnet_v2,
checkpoint_dir=flags.checkpoint_dir,
output_dir='./saved_model',
input_names=['IteratorGetNext'],
output_names=['vnet/loss/total_loss_ref'],
use_amp=flags.use_amp,
use_xla=flags.use_xla,
compress=flags.compress,
params=argparse.Namespace(**params))
if flags.to == 'tftrt':
ds = MSDDataset(json_path=flags.data_dir + "/dataset.json",
interpolator='linear')
iterator = ds.test_fn(count=1).make_one_shot_iterator()
features = iterator.get_next()
sess = tf.Session()
def input_data():
return {'input_tensor:0': sess.run(features)}
to_tf_trt(savedmodel_dir=flags.savedmodel_dir,
output_dir='./tf_trt_model',
precision=flags.precision,
feed_dict_fn=input_data,
num_runs=1,
output_tensor_names=['vnet/Softmax:0'],
compress=flags.compress)
if flags.to == 'onnx':
raise NotImplementedError('Currently ONNX not supported for 3D models')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/export.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import pickle
import shutil
import horovod.tensorflow as hvd
import tensorflow as tf
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from hooks.profiling_hook import ProfilingHook
from hooks.train_hook import TrainHook
from utils.cmd_util import PARSER
from utils.data_loader import MSDDataset
from utils.model_fn import vnet_v2
def main(_):
tf.get_logger().setLevel(logging.ERROR)
hvd.init()
FLAGS = PARSER.parse_args()
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.DEFAULT)]
if FLAGS.log_dir:
backends += [JSONStreamBackend(Verbosity.DEFAULT, FLAGS.log_dir)]
DLLogger.init(backends=backends)
for key in vars(FLAGS):
DLLogger.log(step="PARAMETER", data={str(key): vars(FLAGS)[key]})
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_DISABLE_NVTX_RANGES'] = '1'
dataset = MSDDataset(json_path=os.path.join(FLAGS.data_dir, 'dataset.json'),
dst_size=FLAGS.input_shape,
seed=FLAGS.seed,
interpolator=FLAGS.resize_interpolator,
data_normalization=FLAGS.data_normalization,
batch_size=FLAGS.batch_size,
train_split=FLAGS.train_split,
split_seed=FLAGS.split_seed)
FLAGS.labels = dataset.labels
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
if FLAGS.use_amp:
config.graph_options.rewrite_options.auto_mixed_precision = 1
run_config = tf.estimator.RunConfig(
save_summary_steps=None,
save_checkpoints_steps=None if FLAGS.benchmark else dataset.train_steps * FLAGS.train_epochs,
save_checkpoints_secs=None,
tf_random_seed=None,
session_config=config,
keep_checkpoint_max=1)
estimator = tf.estimator.Estimator(
model_fn=vnet_v2,
model_dir=FLAGS.model_dir if hvd.rank() == 0 else None,
config=run_config,
params=FLAGS)
train_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if 'train' in FLAGS.exec_mode:
steps = dataset.train_steps * FLAGS.train_epochs
if FLAGS.benchmark:
steps = FLAGS.warmup_steps * 2
if hvd.rank() == 0:
train_hooks += [ProfilingHook(FLAGS.warmup_steps, FLAGS.batch_size * hvd.size(), DLLogger)]
else:
if hvd.rank() == 0:
train_hooks += [TrainHook(FLAGS.log_every, DLLogger)]
estimator.train(
input_fn=lambda: dataset.train_fn(FLAGS.augment),
steps=steps,
hooks=train_hooks)
if 'evaluate' in FLAGS.exec_mode:
if hvd.rank() == 0:
if FLAGS.train_split >= 1.0:
raise ValueError("Missing argument: --train_split < 1.0")
result = estimator.evaluate(
input_fn=dataset.eval_fn,
steps=dataset.eval_steps,
hooks=[])
DLLogger.log(step=tuple(), data={'background_dice': str(result['background dice']),
'anterior_dice': str(result['Anterior dice']),
'posterior_dice': str(result['Posterior dice'])})
if 'predict' in FLAGS.exec_mode:
count = 1
hooks = []
if hvd.rank() == 0:
if FLAGS.benchmark:
count = math.ceil((FLAGS.warmup_steps * 2) / dataset.test_steps)
hooks += [ProfilingHook(FLAGS.warmup_steps, FLAGS.batch_size * hvd.size(), DLLogger, training=False)]
predictions = estimator.predict(input_fn=lambda: dataset.test_fn(count=count),
hooks=hooks)
pred = [p['prediction'] for p in predictions]
predict_path = os.path.join(FLAGS.model_dir, 'predictions')
if os.path.exists(predict_path):
shutil.rmtree(predict_path)
os.makedirs(predict_path)
pickle.dump(pred, open(os.path.join(predict_path, 'predictions.pkl'), 'wb'))
if __name__ == '__main__':
tf.compat.v1.app.run()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/main.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
PARSER = argparse.ArgumentParser(description="VNet")
PARSER.add_argument('--exec_mode',
choices=['train', 'predict', 'train_and_predict', 'train_and_evaluate'],
required=True,
type=str)
PARSER.add_argument('--data_normalization',
choices=['zscore'],
default='zscore',
type=str)
PARSER.add_argument('--activation',
choices=['relu'],
default='relu',
type=str)
PARSER.add_argument('--resize_interpolator',
choices=['linear'],
default='linear',
type=str)
PARSER.add_argument('--loss',
choices=['dice'],
default='dice',
type=str)
PARSER.add_argument('--normalization_layer',
choices=['batchnorm'],
default='batchnorm',
type=str)
PARSER.add_argument('--pooling',
choices=['conv_pool'],
default='conv_pool',
type=str)
PARSER.add_argument('--upsampling',
choices=['transposed_conv'],
default='transposed_conv',
type=str)
PARSER.add_argument('--seed',
default=0,
type=int)
PARSER.add_argument('--input_shape', nargs='+', type=int, default=[32, 32, 32])
PARSER.add_argument('--upscale_blocks', nargs='+', type=int, default=[3, 3])
PARSER.add_argument('--downscale_blocks', nargs='+', type=int, default=[3, 3, 3])
PARSER.add_argument('--convolution_size',
choices=[3, 5],
default=3,
type=int)
PARSER.add_argument('--batch_size',
required=True,
type=int)
PARSER.add_argument('--log_every',
default=10,
type=int)
PARSER.add_argument('--warmup_steps',
default=200,
type=int)
PARSER.add_argument('--train_epochs',
default=1,
type=int)
PARSER.add_argument('--optimizer',
choices=['rmsprop'],
default='rmsprop',
type=str)
PARSER.add_argument('--gradient_clipping',
choices=['global_norm'],
default='global_norm',
type=str)
PARSER.add_argument('--base_lr',
default=0.0001,
type=float)
PARSER.add_argument('--momentum',
default=0.0,
type=float)
PARSER.add_argument('--train_split',
default=1.0,
type=float)
PARSER.add_argument('--split_seed',
default=0,
type=int)
PARSER.add_argument('--model_dir',
required=True,
type=str)
PARSER.add_argument('--log_dir',
default=None,
type=str)
PARSER.add_argument('--data_dir',
required=True,
type=str)
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true', default=False)
PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true', default=False)
PARSER.add_argument('--augment', dest='augment', action='store_true', default=False)
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/utils/cmd_util.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import multiprocessing
import os
import SimpleITK as sitk
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from scipy import stats
def parse_nifti(path, dtype, dst_size, interpolator, normalization=None, modality=None):
sitk_image = load_image(path)
sitk_image = resize_image(sitk_image,
dst_size=dst_size,
interpolator=interpolator)
image = sitk_to_np(sitk_image)
if modality and 'CT' not in modality:
if normalization:
image = stats.zscore(image, axis=None)
elif modality:
raise NotImplementedError
return image
def make_ref_image(img_path, dst_size, interpolator):
ref_image = load_image(img_path)
ref_image = resize_image(ref_image, dst_size=dst_size,
interpolator=interpolator)
return sitk_to_np(ref_image) / np.max(ref_image) * 255
def make_interpolator(interpolator):
if interpolator == 'linear':
return sitk.sitkLinear
else:
raise ValueError("Unknown interpolator type")
def load_image(img_path):
image = sitk.ReadImage(img_path)
if image.GetDimension() == 4:
image = sitk.GetImageFromArray(sitk.GetArrayFromImage(image)[-1, :, :, :])
if image.GetPixelID() != sitk.sitkFloat32:
return sitk.Cast(image, sitk.sitkFloat32)
return image
def sitk_to_np(sitk_img):
return np.transpose(sitk.GetArrayFromImage(sitk_img), [2, 1, 0])
def resize_image(sitk_img,
dst_size=(128, 128, 64),
interpolator=sitk.sitkNearestNeighbor):
reference_image = sitk.Image(dst_size, sitk_img.GetPixelIDValue())
reference_image.SetOrigin(sitk_img.GetOrigin())
reference_image.SetDirection(sitk_img.GetDirection())
reference_image.SetSpacing(
[sz * spc / nsz for nsz, sz, spc in zip(dst_size, sitk_img.GetSize(), sitk_img.GetSpacing())])
return sitk.Resample(sitk_img, reference_image, sitk.Transform(3, sitk.sitkIdentity), interpolator)
class MSDJsonParser:
def __init__(self, json_path):
with open(json_path) as f:
data = json.load(f)
self._labels = data.get('labels')
self._x_train = [os.path.join(os.path.dirname(json_path), p['image']) for p in data.get('training')]
self._y_train = [os.path.join(os.path.dirname(json_path), p['label']) for p in data.get('training')]
self._x_test = [os.path.join(os.path.dirname(json_path), p) for p in data.get('test')]
self._modality = [data.get('modality')[k] for k in data.get('modality').keys()]
@property
def labels(self):
return self._labels
@property
def x_train(self):
return self._x_train
@property
def y_train(self):
return self._y_train
@property
def x_test(self):
return self._x_test
@property
def modality(self):
return self._modality
def make_split(json_parser, train_split, split_seed=0):
np.random.seed(split_seed)
train_size = int(len(json_parser.x_train) * train_split)
return np.array(json_parser.x_train)[:train_size], np.array(json_parser.y_train)[:train_size], \
np.array(json_parser.x_train)[train_size:], np.array(json_parser.y_train)[train_size:]
class MSDDataset(object):
def __init__(self, json_path,
dst_size=[128, 128, 64],
seed=None,
interpolator=None,
data_normalization=None,
batch_size=1,
train_split=1.0,
split_seed=0):
self._json_parser = MSDJsonParser(json_path)
self._interpolator = make_interpolator(interpolator)
self._ref_image = make_ref_image(img_path=self._json_parser.x_test[0],
dst_size=dst_size,
interpolator=self._interpolator)
np.random.seed(split_seed)
self._train_img, self._train_label, \
self._eval_img, self._eval_label = make_split(self._json_parser, train_split)
self._test_img = np.array(self._json_parser.x_test)
self._dst_size = dst_size
self._seed = seed
self._batch_size = batch_size
self._train_split = train_split
self._data_normalization = data_normalization
np.random.seed(self._seed)
@property
def labels(self):
return self._json_parser.labels
@property
def train_steps(self):
global_batch_size = hvd.size() * self._batch_size
return math.ceil(
len(self._train_img) / global_batch_size)
@property
def eval_steps(self):
return math.ceil(len(self._eval_img) / self._batch_size)
@property
def test_steps(self):
return math.ceil(len(self._test_img) / self._batch_size)
def _parse_image(self, img):
return parse_nifti(path=img,
dst_size=self._dst_size,
dtype=tf.float32,
interpolator=self._interpolator,
normalization=self._data_normalization,
modality=self._json_parser.modality)
def _parse_label(self, label):
return parse_nifti(path=label,
dst_size=self._dst_size,
dtype=tf.int32,
interpolator=sitk.sitkNearestNeighbor)
def _augment(self, x, y):
# Horizontal flip
h_flip = tf.random_uniform([]) > 0.5
x = tf.cond(h_flip, lambda: tf.image.flip_left_right(x), lambda: x)
y = tf.cond(h_flip, lambda: tf.image.flip_left_right(y), lambda: y)
# Vertical flip
v_flip = tf.random_uniform([]) > 0.5
x = tf.cond(v_flip, lambda: tf.image.flip_up_down(x), lambda: x)
y = tf.cond(v_flip, lambda: tf.image.flip_up_down(y), lambda: y)
return x, y
def _img_generator(self, collection):
for element in collection:
yield self._parse_image(element)
def _label_generator(self, collection):
for element in collection:
yield self._parse_label(element)
def train_fn(self, augment):
images = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._train_img),
output_types=tf.float32,
output_shapes=(32, 32, 32))
labels = tf.data.Dataset.from_generator(generator=lambda: self._label_generator(self._train_label),
output_types=tf.int32,
output_shapes=(32, 32, 32))
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=self._batch_size * 2,
reshuffle_each_iteration=True,
seed=self._seed)
dataset = dataset.shard(hvd.size(), hvd.rank())
if augment:
dataset = dataset.apply(
tf.data.experimental.map_and_batch(map_func=self._augment,
batch_size=self._batch_size,
drop_remainder=True,
num_parallel_calls=multiprocessing.cpu_count()))
else:
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def eval_fn(self):
images = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._eval_img),
output_types=tf.float32,
output_shapes=(32, 32, 32))
labels = tf.data.Dataset.from_generator(generator=lambda: self._label_generator(self._eval_label),
output_types=tf.int32,
output_shapes=(32, 32, 32))
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.cache()
dataset = dataset.batch(self._batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def test_fn(self, count=1):
dataset = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._test_img),
output_types=tf.float32,
output_shapes=(32, 32, 32))
dataset = dataset.cache()
dataset = dataset.repeat(count=count)
dataset = dataset.batch(self._batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/utils/data_loader.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import horovod.tensorflow as hvd
import tensorflow as tf
from model.vnet import Builder
from utils.var_storage import model_variable_scope
def dice_coef(predict, target, dice_type, axis=1, eps=1e-6):
intersection = tf.reduce_sum(predict * target, axis=axis)
if dice_type == 'sorensen':
union = tf.reduce_sum(predict + target, axis=axis)
else:
raise ValueError("dice_type must be either sorensen")
dice = (2 * intersection + eps) / (union + eps)
return tf.reduce_mean(dice, axis=0) # average over batch
def vnet_v2(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
is_predict = (mode == tf.estimator.ModeKeys.PREDICT)
num_classes = len(params.labels)
channel_axis = -1
with model_variable_scope(
'vnet',
reuse=tf.AUTO_REUSE,
dtype=tf.float16,
debug_mode=False
):
features = tf.reshape(features,
[params.batch_size] + params.input_shape + [1])
if labels is not None:
labels = tf.reshape(labels,
[params.batch_size] + params.input_shape + [1])
logits = Builder(kernel_size=params.convolution_size,
n_classes=num_classes,
downscale_blocks=params.downscale_blocks,
upscale_blocks=params.upscale_blocks,
upsampling=params.upsampling,
pooling=params.pooling,
normalization=params.normalization_layer,
activation=params.activation,
mode=mode)(features)
softmax = tf.nn.softmax(logits=logits, axis=channel_axis)
if is_predict:
prediction = tf.argmax(input=softmax, axis=channel_axis)
predictions = {'prediction': prediction}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Flattened logits and softmax - in FP32
flattened_softmax = tf.reshape(softmax, [tf.shape(logits)[0], -1, num_classes])
flattened_softmax = tf.cast(flattened_softmax, tf.float32)
# One hot encoding
flattened_labels = tf.layers.flatten(labels)
one_hot_labels = tf.one_hot(indices=flattened_labels,
depth=num_classes,
dtype=tf.float32)
with tf.name_scope("loss"):
if params.loss == 'dice':
loss = dice_coef(predict=tf.cast(flattened_softmax, tf.float32),
target=one_hot_labels,
dice_type='sorensen')
total_loss = tf.identity(tf.reduce_sum(1. - loss),
name='total_loss_ref')
else:
raise NotImplementedError
train_op = None
if is_training:
global_step = tf.train.get_or_create_global_step()
with tf.name_scope("optimizer"):
if params.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=params.base_lr,
momentum=params.momentum,
centered=True)
else:
raise NotImplementedError
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gradients, variables = zip(*optimizer.compute_gradients(total_loss))
if params.gradient_clipping == 'global_norm':
gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
tf.logging.info('clipping: global_norm')
else:
return NotImplementedError
optimizer = hvd.DistributedOptimizer(optimizer)
try:
amp_envar_enabled = (int(os.environ['TF_ENABLE_AUTO_MIXED_PRECISION']) == 1)
except KeyError:
amp_envar_enabled = False
if params.use_amp and not amp_envar_enabled:
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer,
loss_scale='dynamic'
)
train_op = optimizer.minimize(total_loss, global_step=global_step)
eval_metric_ops = None
if is_eval:
dice_loss = dice_coef(predict=tf.cast(flattened_softmax, tf.float32),
target=one_hot_labels,
dice_type='sorensen')
eval_loss = tf.identity(dice_loss, name='eval_loss_ref')
eval_metric_ops = {}
for i in range(num_classes):
eval_metric_ops['%s dice' % params.labels[str(i)]] = tf.metrics.mean(eval_loss[i])
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op,
eval_metric_ops=eval_metric_ops)
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/utils/model_fn.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import inspect
import os
import shutil
import subprocess
from argparse import Namespace
from typing import List, Callable
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def _compress(src_path: str, dst_path: str):
"""
Compress source path into destination path
:param src_path: (str) Source path
:param dst_path: (str) Destination path
"""
print('[*] Compressing...')
shutil.make_archive(dst_path, 'zip', src_path)
print('[*] Compressed the contents in: {}.zip'.format(dst_path))
def _print_input(func: Callable):
"""
Decorator printing function name and args
:param func: (Callable) Decorated function
:return: Wrapped call
"""
def wrapper(*args, **kwargs):
"""
Print the name and arguments of a function
:param args: Named arguments
:param kwargs: Keyword arguments
:return: Original function call
"""
tf.logging.set_verbosity(tf.logging.ERROR)
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ''.join('\t{} = {!r}\n'.format(*item) for item in func_args.items())
print('[*] Running \'{}\' with arguments:'.format(func.__qualname__))
print(func_args_str[:-1])
return func(*args, **kwargs)
return wrapper
def _parse_placeholder_types(values: str):
"""
Extracts placeholder types from a comma separate list.
:param values: (str) Placeholder types
:return: (List) Placeholder types
"""
values = [int(value) for value in values.split(",")]
return values if len(values) > 1 else values[0]
def _optimize_checkpoint_for_inference(graph_path: str,
input_names: List[str],
output_names: List[str]):
"""
Removes Horovod and training related information from the graph
:param graph_path: (str) Path to the graph.pbtxt file
:param input_names: (str) Input node names
:param output_names: (str) Output node names
"""
print('[*] Optimizing graph for inference ...')
input_graph_def = graph_pb2.GraphDef()
with gfile.Open(graph_path, "rb") as f:
data = f.read()
text_format.Merge(data.decode("utf-8"), input_graph_def)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
input_names,
output_names,
_parse_placeholder_types(str(dtypes.float32.as_datatype_enum)),
False)
print('[*] Saving original graph in: {}'.format(graph_path + '.old'))
shutil.move(graph_path, graph_path + '.old')
print('[*] Writing down optimized graph ...')
graph_io.write_graph(output_graph_def,
os.path.dirname(graph_path),
os.path.basename(graph_path))
@_print_input
def to_savedmodel(input_shape: str,
model_fn: Callable,
checkpoint_dir: str,
output_dir: str,
input_names: List[str],
output_names: List[str],
use_amp: bool,
use_xla: bool,
compress: bool,
params: Namespace):
"""
Export checkpoint to Tensorflow savedModel
:param input_shape: (str) Input shape to the model in format [batch, height, width, channels]
:param model_fn: (Callable) Estimator's model_fn
:param checkpoint_dir: (str) Directory where checkpoints are stored
:param output_dir: (str) Output directory for storage of the generated savedModel
:param input_names: (List[str]) Input node names
:param output_names: (List[str]) Output node names
:param use_amp: (bool )Enable TF-AMP
:param use_xla: (bool) Enable XLA
:param compress: (bool) Compress output
:param params: (Namespace) Namespace to be passed to model_fn
"""
assert os.path.exists(checkpoint_dir), 'Path not found: {}'.format(checkpoint_dir)
assert input_shape is not None, 'Input shape must be provided'
_optimize_checkpoint_for_inference(os.path.join(checkpoint_dir, 'graph.pbtxt'), input_names, output_names)
try:
ckpt_path = os.path.splitext([p for p in glob.iglob(os.path.join(checkpoint_dir, '*.index'))][0])[0]
except IndexError:
raise ValueError('Could not find checkpoint in directory: {}'.format(checkpoint_dir))
config_proto = tf.compat.v1.ConfigProto()
config_proto.allow_soft_placement = True
config_proto.log_device_placement = False
config_proto.gpu_options.allow_growth = True
config_proto.gpu_options.force_gpu_compatible = True
if use_amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
if use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
run_config = tf.estimator.RunConfig(
model_dir=None,
tf_random_seed=None,
save_summary_steps=1e9, # disabled
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=config_proto,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=1e9, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=ckpt_path,
config=run_config,
params=params
)
print('[*] Exporting the model ...')
input_type = tf.float16 if use_amp else tf.float32
def get_serving_input_receiver_fn():
def serving_input_receiver_fn():
features = tf.placeholder(dtype=input_type, shape=input_shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features)
return serving_input_receiver_fn
export_path = estimator.export_saved_model(
export_dir_base=output_dir,
serving_input_receiver_fn=get_serving_input_receiver_fn(),
checkpoint_path=ckpt_path
)
print('[*] Done! path: `%s`' % export_path.decode())
if compress:
_compress(export_path.decode(), os.path.join(output_dir, 'saved_model'))
@_print_input
def to_tf_trt(savedmodel_dir: str,
output_dir: str,
precision: str,
feed_dict_fn: Callable,
num_runs: int,
output_tensor_names: List[str],
compress: bool):
"""
Export Tensorflow savedModel to TF-TRT
:param savedmodel_dir: (str) Input directory containing a Tensorflow savedModel
:param output_dir: (str) Output directory for storage of the generated TF-TRT exported model
:param precision: (str) Desired precision of the network (FP32, FP16 or INT8)
:param feed_dict_fn: (Callable) Input tensors for INT8 calibration. Model specific.
:param num_runs: (int) Number of calibration runs.
:param output_tensor_names: (List) Name of the output tensor for graph conversion. Model specific.
:param compress: (bool) Compress output
"""
if savedmodel_dir is None or not os.path.exists(savedmodel_dir):
raise FileNotFoundError('savedmodel_dir not found: {}'.format(savedmodel_dir))
if os.path.exists(output_dir):
print('[*] Output dir \'{}\' is not empty. Cleaning up ...'.format(output_dir))
shutil.rmtree(output_dir)
print('[*] Converting model...')
converter = trt.TrtGraphConverter(input_saved_model_dir=savedmodel_dir,
precision_mode=precision)
converter.convert()
if precision == 'INT8':
print('[*] Running INT8 calibration ...')
converter.calibrate(fetch_names=output_tensor_names, num_runs=num_runs, feed_dict_fn=feed_dict_fn)
converter.save(output_dir)
print('[*] Done! TF-TRT saved_model stored in: `%s`' % output_dir)
if compress:
_compress('tftrt_saved_model', output_dir)
@_print_input
def to_onnx(input_dir: str, output_dir: str, compress: bool):
"""
Convert Tensorflow savedModel to ONNX with tf2onnx
:param input_dir: (str) Input directory with a Tensorflow savedModel
:param output_dir: (str) Output directory where to store the ONNX version of the model
:param compress: (bool) Compress output
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_name = os.path.join(output_dir, 'model.onnx')
print('[*] Converting model...')
ret = subprocess.call(['python', '-m', 'tf2onnx.convert',
'--saved-model', input_dir,
'--output', file_name],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
if ret > 0:
raise RuntimeError('tf2onnx.convert has failed with error: {}'.format(ret))
print('[*] Done! ONNX file stored in: %s' % file_name)
if compress:
_compress(output_dir, 'onnx_model')
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/utils/tf_export.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
__all__ = ['model_variable_scope']
def model_variable_scope(name, reuse=False, dtype=tf.float32, debug_mode=False, *args, **kwargs):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
def _custom_dtype_getter(getter, name, shape=None, dtype=None, trainable=True, regularizer=None, *args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
storage_dtype = tf.float32 if dtype in [tf.float32, tf.float16] else dtype
variable = getter(
name,
shape,
dtype=storage_dtype,
trainable=trainable,
regularizer=(
regularizer if
(trainable and not any(l_name.lower() in name.lower()
for l_name in ['batchnorm', 'batch_norm'])) else None
),
*args,
**kwargs
)
if dtype != tf.float32:
cast_name = name + '/fp16_cast'
try:
cast_variable = tf.get_default_graph().get_tensor_by_name(cast_name + ':0')
except KeyError:
cast_variable = tf.cast(variable, dtype, name=cast_name)
cast_variable._ref = variable._ref
variable = cast_variable
return variable
return tf.variable_scope(name, reuse=reuse, dtype=dtype, custom_getter=_custom_dtype_getter, *args, **kwargs)
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/utils/var_storage.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import dllogger as DLLogger
class TrainHook(tf.estimator.SessionRunHook):
def __init__(self, log_every, logger):
self._log_every = log_every
self._step = 0
self._logger = logger
def before_run(self, run_context):
run_args = tf.train.SessionRunArgs(
fetches=[
'vnet/loss/total_loss_ref:0',
]
)
return run_args
def after_run(self,
run_context,
run_values):
if self._step % self._log_every == 0:
self._logger.log(step=(self._step,), data={'total_loss': str(run_values.results[0])})
self._step += 1
def end(self, session):
self._logger.flush()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/hooks/train_hook.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
import dllogger as DLLogger
class ProfilingHook(tf.estimator.SessionRunHook):
def __init__(self, warmup_steps, global_batch_size, logger, training=True):
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._step = 0
self._timestamps = []
self._logger = logger
self._training = training
def before_run(self, run_context):
self._step += 1
if self._step >= self._warmup_steps:
self._timestamps.append(time.time())
def end(self, session):
deltas = np.array([self._timestamps[i + 1] - self._timestamps[i] for i in range(len(self._timestamps) - 1)])
stats = process_performance_stats(np.array(deltas),
self._global_batch_size)
self._logger.log(step=(), data={metric: value for (metric, value) in stats})
self._logger.flush()
def process_performance_stats(timestamps, batch_size):
timestamps_ms = 1000 * timestamps
latency_ms = timestamps_ms.mean()
std = timestamps_ms.std()
n = np.sqrt(len(timestamps_ms))
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = [("Throughput Avg", str(throughput_imgps)),
('Latency Avg:', str(latency_ms))]
for ci, lvl in zip(["90%:", "95%:", "99%:"],
[1.645, 1.960, 2.576]):
stats.append(("Latency_"+ci, str(latency_ms + lvl * std / n)))
return stats | DeepLearningExamples-master | TensorFlow/Segmentation/VNet/hooks/profiling_hook.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from os.path import dirname
PARSER = argparse.ArgumentParser(description="vnet_benchmark")
PARSER.add_argument('--data_dir',
required=True,
type=str)
PARSER.add_argument('--model_dir',
required=True,
type=str)
PARSER.add_argument('--mode',
choices=['train', 'predict'],
required=True,
type=str)
PARSER.add_argument('--gpus',
choices=[1, 8],
required=True,
type=int)
PARSER.add_argument('--batch_size',
required=True,
type=int)
PARSER.add_argument('--amp', dest='use_amp', action='store_true', default=False)
def build_horovod_prefix(gpus):
return 'mpirun -np {} -H localhost:{} -bind-to none -map-by slot -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH -mca ' \
'pml ob1 -mca btl ^openib --allow-run-as-root '.format(gpus, gpus)
def build_command(FLAGS, path_to_main, use_amp):
return 'python {} --data_dir {} --model_dir {} --exec_mode {} --batch_size {} {} --augment --benchmark'.format(
path_to_main,
FLAGS.data_dir,
FLAGS.model_dir,
FLAGS.mode,
FLAGS.batch_size,
use_amp)
def main():
FLAGS = PARSER.parse_args()
use_amp = '--amp' if FLAGS.use_amp else ''
path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), 'main.py')
cmd = build_command(FLAGS, path_to_main, use_amp)
if FLAGS.gpus > 1:
assert FLAGS.mode != 'predict', 'Prediction can only be benchmarked on 1 GPU'
cmd = build_horovod_prefix(FLAGS.gpus) + cmd
print('Command to be executed:')
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/examples/vnet_benchmark.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from os.path import dirname
PARSER = argparse.ArgumentParser(description="vnet_train_and_evaluate")
PARSER.add_argument('--data_dir',
required=True,
type=str,
help='Directory where the dataset is stored')
PARSER.add_argument('--model_dir',
required=True,
type=str,
help='Directory where model information (including checkpoints) is stored')
PARSER.add_argument('--gpus',
choices=[1, 8],
required=True,
type=int,
help='Number of GPUs')
PARSER.add_argument('--batch_size',
default=1,
type=int,
help='Batch size for training')
PARSER.add_argument('--epochs',
default=40,
type=int,
help='Number of epochs for training')
PARSER.add_argument('--amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--base_lr',
default=0.0001,
type=float,
help='Initial learning rate for RMSProp')
def build_horovod_prefix(gpus):
return 'mpirun -np {} -H localhost:{} -bind-to none -map-by slot -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH -mca ' \
'pml ob1 -mca btl ^openib --allow-run-as-root '.format(gpus, gpus)
def build_command(FLAGS, path_to_main, use_amp):
return 'python {} --data_dir {} --model_dir {} --exec_mode train_and_evaluate --batch_size {} {} --augment --train_epochs {} --train_split 0.9 --split_seed 42 --base_lr {}'.format(
path_to_main,
FLAGS.data_dir,
FLAGS.model_dir,
FLAGS.batch_size,
use_amp,
FLAGS.epochs,
FLAGS.base_lr)
def main():
FLAGS = PARSER.parse_args()
use_amp = '--amp' if FLAGS.use_amp else ''
path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), 'main.py')
cmd = build_command(FLAGS, path_to_main, use_amp)
if FLAGS.gpus > 1:
cmd = build_horovod_prefix(FLAGS.gpus) + cmd
print('Command to be executed:')
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/examples/vnet_train_and_evaluate.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from os.path import dirname
PARSER = argparse.ArgumentParser(description="vnet_predict")
PARSER.add_argument('--data_dir',
required=True,
type=str)
PARSER.add_argument('--model_dir',
required=True,
type=str)
PARSER.add_argument('--batch_size',
required=True,
type=int)
PARSER.add_argument('--amp', dest='use_amp', action='store_true', default=False)
def build_command(FLAGS, path_to_main, use_amp):
return 'python {} --data_dir {} --model_dir {} --exec_mode predict --batch_size {} {}'.format(
path_to_main,
FLAGS.data_dir,
FLAGS.model_dir,
FLAGS.batch_size,
use_amp)
def main():
FLAGS = PARSER.parse_args()
use_amp = '--amp' if FLAGS.use_amp else ''
path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), 'main.py')
cmd = build_command(FLAGS, path_to_main, use_amp)
print('Command to be executed:')
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/examples/vnet_predict.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from os.path import dirname
PARSER = argparse.ArgumentParser(description="vnet_train")
PARSER.add_argument('--data_dir',
required=True,
type=str,
help='Directory where the dataset is stored')
PARSER.add_argument('--model_dir',
required=True,
type=str,
help='Directory where model information (including checkpoints) is stored')
PARSER.add_argument('--gpus',
choices=[1, 8],
required=True,
type=int,
help='Number of GPUs')
PARSER.add_argument('--batch_size',
default=1,
type=int,
help='Batch size for training')
PARSER.add_argument('--epochs',
default=40,
type=int,
help='Number of epochs for training')
PARSER.add_argument('--amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--base_lr',
default=0.0001,
type=float,
help='Initial learning rate for RMSProp')
def build_horovod_prefix(gpus):
return 'mpirun -np {} -H localhost:{} -bind-to none -map-by slot -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH -mca ' \
'pml ob1 -mca btl ^openib --allow-run-as-root '.format(gpus, gpus)
def build_command(FLAGS, path_to_main, use_amp):
return 'python {} --data_dir {} --model_dir {} --exec_mode train --batch_size {} {} --augment --train_epochs {} --base_lr {}'.format(
path_to_main,
FLAGS.data_dir,
FLAGS.model_dir,
FLAGS.batch_size,
use_amp,
FLAGS.epochs,
FLAGS.base_lr)
def main():
FLAGS = PARSER.parse_args()
use_amp = '--amp' if FLAGS.use_amp else ''
path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), 'main.py')
cmd = build_command(FLAGS, path_to_main, use_amp)
if FLAGS.gpus > 1:
cmd = build_horovod_prefix(FLAGS.gpus) + cmd
print('Command to be executed:')
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/examples/vnet_train.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.layers import input_block, downsample_block, upsample_block, output_block
class Builder():
def __init__(self, kernel_size, n_classes, upscale_blocks, downscale_blocks, upsampling, pooling, normalization,
activation, mode):
self._kernel_size = kernel_size
self._pooling = pooling
self._upsampling = upsampling
self._normalization = normalization
self._activation = activation
self._mode = mode
self._n_classes = n_classes
self._downscale_blocks = downscale_blocks
self._upscale_blocks = upscale_blocks
def __call__(self, features):
x = input_block(inputs=features,
filters=16,
kernel_size=self._kernel_size,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
skip_connections = [x]
for depth in self._downscale_blocks:
x = downsample_block(inputs=x,
depth=depth,
kernel_size=self._kernel_size,
pooling=self._pooling,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
skip_connections.append(x)
del skip_connections[-1]
for depth in self._upscale_blocks:
x = upsample_block(inputs=x,
residual_inputs=skip_connections.pop(),
depth=depth,
upsampling=self._upsampling,
kernel_size=self._kernel_size,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
return output_block(inputs=x,
residual_inputs=skip_connections.pop(),
kernel_size=self._kernel_size,
n_classes=self._n_classes,
upsampling=self._upsampling,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/model/vnet.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def normalization_layer(inputs, name, mode):
if name == 'batchnorm':
return tf.layers.batch_normalization(inputs=inputs,
axis=-1,
training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable=True,
virtual_batch_size=None)
elif name == 'none':
return inputs
else:
raise ValueError('Invalid normalization layer')
def activation_layer(x, activation):
if activation == 'relu':
return tf.nn.relu(x)
elif activation == 'none':
return x
else:
raise ValueError("Unkown activation {}".format(activation))
def convolution_layer(inputs, filters, kernel_size, stride, normalization, activation, mode):
x = tf.layers.conv3d(inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=stride,
activation=None,
padding='same',
data_format='channels_last',
use_bias=True,
kernel_initializer=tf.glorot_uniform_initializer(),
bias_initializer=tf.zeros_initializer(),
bias_regularizer=None)
x = normalization_layer(x, normalization, mode)
return activation_layer(x, activation)
def downsample_layer(inputs, pooling, normalization, activation, mode):
if pooling == 'conv_pool':
return convolution_layer(inputs=inputs,
filters=inputs.get_shape()[-1] * 2,
kernel_size=2,
stride=2,
normalization=normalization,
activation=activation,
mode=mode)
else:
raise ValueError('Invalid downsampling method: {}'.format(pooling))
def upsample_layer(inputs, filters, upsampling, normalization, activation, mode):
if upsampling == 'transposed_conv':
x = tf.layers.conv3d_transpose(inputs=inputs,
filters=filters,
kernel_size=2,
strides=2,
activation=None,
padding='same',
data_format='channels_last',
use_bias=True,
kernel_initializer=tf.glorot_uniform_initializer(),
bias_initializer=tf.zeros_initializer(),
bias_regularizer=None)
x = normalization_layer(x, normalization, mode)
return activation_layer(x, activation)
else:
raise ValueError('Unsupported upsampling: {}'.format(upsampling))
def residual_block(input_0, input_1, kernel_size, depth, normalization, activation, mode):
with tf.name_scope('residual_block'):
x = input_0
if input_1 is not None:
x = tf.concat([input_0, input_1], axis=-1)
inputs = x
n_input_channels = inputs.get_shape()[-1]
for i in range(depth):
x = convolution_layer(inputs=x,
filters=n_input_channels,
kernel_size=kernel_size,
stride=1,
normalization=normalization,
activation=activation,
mode=mode)
return x + inputs
def input_block(inputs, filters, kernel_size, normalization, activation, mode):
with tf.name_scope('conversion_block'):
x = inputs
return convolution_layer(inputs=inputs,
filters=filters,
kernel_size=kernel_size,
stride=1,
normalization=normalization,
activation=activation,
mode=mode) + x
def downsample_block(inputs, depth, kernel_size, pooling, normalization, activation, mode):
with tf.name_scope('downsample_block'):
x = downsample_layer(inputs,
pooling=pooling,
normalization=normalization,
activation=activation,
mode=mode)
return residual_block(input_0=x,
input_1=None,
depth=depth,
kernel_size=kernel_size,
normalization=normalization,
activation=activation,
mode=mode)
def upsample_block(inputs, residual_inputs, depth, kernel_size, upsampling, normalization, activation, mode):
with tf.name_scope('upsample_block'):
x = upsample_layer(inputs,
filters=residual_inputs.get_shape()[-1],
upsampling=upsampling,
normalization=normalization,
activation=activation,
mode=mode)
return residual_block(input_0=x,
input_1=residual_inputs,
depth=depth,
kernel_size=kernel_size,
normalization=normalization,
activation=activation,
mode=mode)
def output_block(inputs, residual_inputs, n_classes, kernel_size, upsampling, normalization, activation, mode):
with tf.name_scope('output_block'):
x = upsample_layer(inputs,
filters=residual_inputs.get_shape()[-1],
upsampling=upsampling,
normalization=normalization,
activation=activation,
mode=mode)
return convolution_layer(inputs=x,
filters=n_classes,
kernel_size=kernel_size,
stride=1,
mode=mode,
activation='none',
normalization='none')
| DeepLearningExamples-master | TensorFlow/Segmentation/VNet/model/layers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
PARSER = argparse.ArgumentParser(description="U-Net medical")
PARSER.add_argument('--data_dir',
type=str,
default='./data',
help="""Directory where to download the dataset""")
def main():
FLAGS = PARSER.parse_args()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-volume.tif -P {}'.format(FLAGS.data_dir))
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-labels.tif -P {}'.format(FLAGS.data_dir))
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/test-volume.tif -P {}'.format(FLAGS.data_dir))
print("Finished downloading files for U-Net medical to {}".format(FLAGS.data_dir))
if __name__ == '__main__':
main() | DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/download_dataset.py |
import argparse
import tensorflow as tf
from dlexport.tensorflow import to_savedmodel, to_onnx, to_tensorrt
from utils.data_loader import Dataset
from utils.model_fn import unet_fn
PARSER = argparse.ArgumentParser(description="U-Net medical")
PARSER.add_argument('--to', dest='to', choices=['savedmodel', 'tensorrt', 'onnx'], required=True)
PARSER.add_argument('--use_amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--use_xla', dest='use_xla', action='store_true', default=False)
PARSER.add_argument('--compress', dest='compress', action='store_true', default=False)
PARSER.add_argument('--input_shape',
nargs='+',
type=int,
help="""Directory where to download the dataset""")
PARSER.add_argument('--data_dir',
type=str,
help="""Directory where to download the dataset""")
PARSER.add_argument('--checkpoint_dir',
type=str,
help="""Directory where to download the dataset""")
PARSER.add_argument('--savedmodel_dir',
type=str,
help="""Directory where to download the dataset""")
PARSER.add_argument('--precision',
type=str,
choices=['FP32', 'FP16', 'INT8'],
help="""Directory where to download the dataset""")
def main():
"""
Starting point of the application
"""
flags = PARSER.parse_args()
if flags.to == 'savedmodel':
to_savedmodel(input_shape=flags.input_shape,
model_fn=unet_fn,
src_dir=flags.checkpoint_dir,
dst_dir='./saved_model',
input_names=['IteratorGetNext'],
output_names=['total_loss_ref'],
use_amp=flags.use_amp,
use_xla=flags.use_xla,
compress=flags.compress)
if flags.to == 'tensorrt':
ds = Dataset(data_dir=flags.data_dir,
batch_size=1,
augment=False,
gpu_id=0,
num_gpus=1,
seed=42)
iterator = ds.test_fn(count=1).make_one_shot_iterator()
features = iterator.get_next()
sess = tf.Session()
def input_data():
return {'input_tensor:0': sess.run(features)}
to_tensorrt(src_dir=flags.savedmodel_dir,
dst_dir='./tf_trt_model',
precision=flags.precision,
feed_dict_fn=input_data,
num_runs=1,
output_tensor_names=['Softmax:0'],
compress=flags.compress)
if flags.to == 'onnx':
to_onnx(src_dir=flags.savedmodel_dir,
dst_dir='./onnx_model',
compress=flags.compress)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/export.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point of the application.
This file serves as entry point to the training of UNet for segmentation of neuronal processes.
Example:
Training can be adjusted by modifying the arguments specified below::
$ python main.py --exec_mode train --model_dir /datasets ...
"""
import os
import horovod.tensorflow as hvd
import math
import numpy as np
import tensorflow as tf
from PIL import Image
from utils.setup import prepare_model_dir, get_logger, build_estimator, set_flags
from utils.cmd_util import PARSER, parse_args
from utils.data_loader import Dataset
from utils.hooks.profiling_hook import ProfilingHook
from utils.hooks.training_hook import TrainingHook
def main(_):
"""
Starting point of the application
"""
hvd.init()
set_flags()
params = parse_args(PARSER.parse_args())
model_dir = prepare_model_dir(params)
logger = get_logger(params)
estimator = build_estimator(params, model_dir)
dataset = Dataset(data_dir=params.data_dir,
batch_size=params.batch_size,
fold=params.crossvalidation_idx,
augment=params.augment,
gpu_id=hvd.rank(),
num_gpus=hvd.size(),
seed=params.seed)
if 'train' in params.exec_mode:
max_steps = params.max_steps // (1 if params.benchmark else hvd.size())
hooks = [hvd.BroadcastGlobalVariablesHook(0),
TrainingHook(logger,
max_steps=max_steps,
log_every=params.log_every)]
if params.benchmark and hvd.rank() == 0:
hooks.append(ProfilingHook(logger,
batch_size=params.batch_size,
log_every=params.log_every,
warmup_steps=params.warmup_steps,
mode='train'))
estimator.train(
input_fn=dataset.train_fn,
steps=max_steps,
hooks=hooks)
if 'evaluate' in params.exec_mode:
if hvd.rank() == 0:
results = estimator.evaluate(input_fn=dataset.eval_fn, steps=dataset.eval_size)
logger.log(step=(),
data={"eval_ce_loss": float(results["eval_ce_loss"]),
"eval_dice_loss": float(results["eval_dice_loss"]),
"eval_total_loss": float(results["eval_total_loss"]),
"eval_dice_score": float(results["eval_dice_score"])})
if 'predict' in params.exec_mode:
if hvd.rank() == 0:
predict_steps = dataset.test_size
hooks = None
if params.benchmark:
hooks = [ProfilingHook(logger,
batch_size=params.batch_size,
log_every=params.log_every,
warmup_steps=params.warmup_steps,
mode="test")]
predict_steps = params.warmup_steps * 2 * params.batch_size
predictions = estimator.predict(
input_fn=lambda: dataset.test_fn(count=math.ceil(predict_steps / dataset.test_size)),
hooks=hooks)
binary_masks = [np.argmax(p['logits'], axis=-1).astype(np.uint8) * 255 for p in predictions]
if not params.benchmark:
multipage_tif = [Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR)
for mask in binary_masks]
output_dir = os.path.join(params.model_dir, 'pred')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
multipage_tif[0].save(os.path.join(output_dir, 'test-masks.tif'),
compression="tiff_deflate",
save_all=True,
append_images=multipage_tif[1:])
if __name__ == '__main__':
tf.compat.v1.app.run()
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/main.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Runner class encapsulating the training
This module provides the functionality to initialize a run with hyper-parameters
which can be later used for training and inference.
Example:
Runner can be created with a parameter dictionary, and those parameters
are reused for training and inference::
params = {...}
runner = Runner(params)
runner.train()
runner.predict()
"""
import time
import os
import pickle
from PIL import Image
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from dllogger.logger import LOGGER
from model.unet import unet_v1
from utils.data_loader import Dataset
from utils.hooks.profiler_hook import ProfilerHook
from utils.var_storage import model_variable_scope
# Class Dice coefficient averaged over batch
def dice_coef(predict, target, axis=1, eps=1e-6):
intersection = tf.reduce_sum(predict * target, axis=axis)
union = tf.reduce_sum(predict * predict + target * target, axis=axis)
dice = (2. * intersection + eps) / (union + eps)
return tf.reduce_mean(dice, axis=0) # average over batch
def regularization_l2loss(weight_decay):
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN"""
return all([
tensor_name not in name.lower()
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
])
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def is_using_hvd():
env_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
if all([var in os.environ for var in env_vars]):
return True
else:
return False
def _model_fn(features, labels, mode, params):
""" Model function for tf.Estimator
Controls how the training is performed by specifying how the
total_loss is computed and applied in the backward pass.
Args:
features (tf.Tensor): Tensor samples
labels (tf.Tensor): Tensor labels
mode (tf.estimator.ModeKeys): Indicates if we train, evaluate or predict
params (dict): Additional parameters supplied to the estimator
Returns:
Appropriate tf.estimator.EstimatorSpec for the current mode
"""
dtype = params['dtype']
max_steps = params['max_steps']
lr_init = params['learning_rate']
momentum = params['momentum']
device = '/gpu:0'
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(lr_init, global_step,
decay_steps=max_steps,
decay_rate=0.96)
with tf.device(device):
features = tf.cast(features, dtype)
with model_variable_scope(
'UNet',
reuse=tf.AUTO_REUSE,
dtype=tf.float16,
debug_mode=False
):
output_map = unet_v1(features, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': tf.nn.softmax(output_map, axis=-1)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
n_classes = output_map.shape[-1].value
flat_logits = tf.reshape(tf.cast(output_map, tf.float32),
[tf.shape(output_map)[0], -1, n_classes])
flat_labels = tf.reshape(labels,
[tf.shape(output_map)[0], -1, n_classes])
crossentropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels),
name='cross_loss_ref')
dice_loss = tf.reduce_mean(1 - dice_coef(flat_logits, flat_labels), name='dice_loss_ref')
total_loss = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum)
if is_using_hvd():
opt = hvd.DistributedOptimizer(opt, device_dense='/gpu:0')
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
deterministic = True
gate_gradients = (
tf.train.Optimizer.GATE_OP
if deterministic
else tf.train.Optimizer.GATE_NONE)
train_op = opt.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op,
eval_metric_ops={})
class Runner():
""" Runner class for encapsulating hyperparameters
This class is constructed with a set of hyper-parameters
which are later reused for training and prediction
Args:
params (dict): Provides the parametrization for training and prediction
Attributes:
_max_steps (int): Number of steps for training
_classifier (tf.estimator.Estimator): Estimator used for training and validation
_dataset (tf.data.Dataset): Source of sample and label pairs
_training_hooks (tf.train.SessionRunHook): Parallel training, and benchmarking utils
"""
def __init__(self, params):
hvd.init()
LOGGER.log(str(params))
data_dir = params['data_dir']
batch_size = params['batch_size']
augment = params['augment']
benchmark = params['benchmark']
seed = params['seed']
self._model_dir = params['model_dir']
self._max_steps = params['max_steps']
self._classifier = tf.estimator.Estimator(
model_fn=_model_fn,
model_dir=self._model_dir,
params=params,
config=tf.estimator.RunConfig(
tf_random_seed=None,
session_config=self._get_session_config(),
save_checkpoints_steps=self._max_steps if hvd.rank() == 0 else None,
keep_checkpoint_max=1))
self._dataset = Dataset(data_dir=data_dir,
batch_size=batch_size,
augment=augment,
gpu_id=hvd.rank(),
num_gpus=hvd.size(),
seed=seed)
self._training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if benchmark and hvd.rank() == 0:
self._training_hooks.append(ProfilerHook(self._model_dir, batch_size, log_every=params['log_every'],
warmup_steps=params['warmup_steps']))
def _get_session_config(self):
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = max(2, 40 // hvd.size() - 2)
return config
def train(self):
"""Perform training with the runner's classifier"""
LOGGER.log("Begin training...")
try:
self._classifier.train(
input_fn=self._dataset.train_fn,
steps=self._max_steps,
hooks=self._training_hooks)
except KeyboardInterrupt:
print("Keyboard interrupt")
LOGGER.log("Training finished")
def predict(self):
"""Perform prediction with the runner's classifier """
if hvd.rank() == 0:
LOGGER.log("Begin predict...")
begin = time.time()
pred = self._classifier.predict(input_fn=self._dataset.test_fn)
predictions = [p['logits'] for p in pred]
print('Inference took: {} sec'.format(time.time() - begin))
binary_masks = [np.argmax(p, axis=-1).astype(np.uint8) * 255 for p in predictions]
multipage_tif = [Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR)
for mask in binary_masks]
output_dir = os.path.join(self._model_dir, 'pred')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
multipage_tif[0].save(os.path.join(output_dir, 'test-masks.tif'),
compression="tiff_deflate",
save_all=True,
append_images=multipage_tif[1:])
pickle.dump(predictions, open(os.path.join(output_dir, 'predictions.pkl'), 'wb'))
LOGGER.log("Predict finished")
def benchmark(self):
if hvd.rank() == 0:
self._classifier.evaluate(input_fn=self._dataset.synth_fn,
steps=self._max_steps,
hooks=[self._training_hooks[-1]])
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import argparse
def process_performance_stats(timestamps, batch_size, mode):
""" Get confidence intervals
:param timestamps: Collection of timestamps
:param batch_size: Number of samples per batch
:param mode: Estimator's execution mode
:return: Stats
"""
timestamps_ms = 1000 * timestamps
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = {f"throughput_{mode}": throughput_imgps,
f"latency_{mode}_mean": timestamps_ms.mean()}
for level in [90, 95, 99]:
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
return stats
def parse_convergence_results(path, environment):
dice_scores = []
ce_scores = []
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(os.path.join(path, logfile), "r") as f:
content = f.readlines()[-1]
if "eval_dice_score" not in content:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
dice_scores.append(float([val for val in content.split(" ")
if "eval_dice_score" in val][0].split()[-1]))
ce_scores.append(float([val for val in content.split(" ")
if "eval_ce_loss" in val][0].split()[-1]))
if dice_scores:
print("Evaluation dice score:", sum(dice_scores) / len(dice_scores))
print("Evaluation cross-entropy loss:", sum(ce_scores) / len(ce_scores))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="UNet-medical-utils")
parser.add_argument('--exec_mode',
choices=['convergence', 'benchmark'],
type=str,
help="""Which execution mode to run the model into""")
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
choices=['FP32_1GPU', 'FP32_8GPU', 'TF-AMP_1GPU', 'TF-AMP_8GPU'],
type=str,
required=True)
args = parser.parse_args()
if args.exec_mode == 'convergence':
parse_convergence_results(path=args.model_dir, environment=args.env)
elif args.exec_mode == 'benchmark':
pass
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/parse_results.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line argument parsing"""
import argparse
from munch import Munch
PARSER = argparse.ArgumentParser(description="UNet-medical")
PARSER.add_argument('--exec_mode',
choices=['train', 'train_and_predict', 'predict', 'evaluate', 'train_and_evaluate'],
type=str,
default='train_and_evaluate',
help="""Execution mode of running the model""")
PARSER.add_argument('--model_dir',
type=str,
default='./results',
help="""Output directory for information related to the model""")
PARSER.add_argument('--data_dir',
type=str,
required=True,
help="""Input directory containing the dataset for training the model""")
PARSER.add_argument('--log_dir',
type=str,
default=None,
help="""Output directory for training logs""")
PARSER.add_argument('--batch_size',
type=int,
default=1,
help="""Size of each minibatch per GPU""")
PARSER.add_argument('--learning_rate',
type=float,
default=0.0001,
help="""Learning rate coefficient for AdamOptimizer""")
PARSER.add_argument('--crossvalidation_idx',
type=int,
default=None,
help="""Chosen fold for cross-validation. Use None to disable cross-validation""")
PARSER.add_argument('--max_steps',
type=int,
default=1000,
help="""Maximum number of steps (batches) used for training""")
PARSER.add_argument('--weight_decay',
type=float,
default=0.0005,
help="""Weight decay coefficient""")
PARSER.add_argument('--log_every',
type=int,
default=100,
help="""Log performance every n steps""")
PARSER.add_argument('--warmup_steps',
type=int,
default=200,
help="""Number of warmup steps""")
PARSER.add_argument('--seed',
type=int,
default=0,
help="""Random seed""")
PARSER.add_argument('--augment', dest='augment', action='store_true',
help="""Perform data augmentation during training""")
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true',
help="""Collect performance metrics during training""")
PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true',
help="""Train using TF-AMP""")
PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true',
help="""Train using XLA""")
PARSER.add_argument('--use_trt', dest='use_trt', action='store_true',
help="""Use TF-TRT""")
PARSER.add_argument('--resume_training', dest='resume_training', action='store_true',
help="""Resume training from a checkpoint""")
def parse_args(flags):
return Munch({
'exec_mode': flags.exec_mode,
'model_dir': flags.model_dir,
'data_dir': flags.data_dir,
'log_dir': flags.log_dir,
'batch_size': flags.batch_size,
'learning_rate': flags.learning_rate,
'crossvalidation_idx': flags.crossvalidation_idx,
'max_steps': flags.max_steps,
'weight_decay': flags.weight_decay,
'log_every': flags.log_every,
'warmup_steps': flags.warmup_steps,
'augment': flags.augment,
'benchmark': flags.benchmark,
'seed': flags.seed,
'use_amp': flags.use_amp,
'use_trt': flags.use_trt,
'use_xla': flags.use_xla,
'resume_training': flags.resume_training,
})
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/cmd_util.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Dataset class encapsulates the data loading"""
import multiprocessing
import os
from collections import deque
import numpy as np
import tensorflow as tf
from PIL import Image, ImageSequence
class Dataset():
"""Load, separate and prepare the data for training and prediction"""
def __init__(self, data_dir, batch_size, fold=1, augment=False, gpu_id=0, num_gpus=1, seed=0):
if not os.path.exists(data_dir):
raise FileNotFoundError('Cannot find data dir: {}'.format(data_dir))
self._data_dir = data_dir
self._batch_size = batch_size
self._augment = augment
self._seed = seed
images = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-volume.tif'))
masks = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-labels.tif'))
self._test_images = \
self._load_multipage_tiff(os.path.join(self._data_dir, 'test-volume.tif'))
train_indices, val_indices = self._get_val_train_indices(len(images), fold)
self._train_images = images[train_indices]
self._train_masks = masks[train_indices]
self._val_images = images[val_indices]
self._val_masks = masks[val_indices]
self._num_gpus = num_gpus
self._gpu_id = gpu_id
@property
def train_size(self):
return len(self._train_images)
@property
def eval_size(self):
return len(self._val_images)
@property
def test_size(self):
return len(self._test_images)
def _load_multipage_tiff(self, path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def _get_val_train_indices(self, length, fold, ratio=0.8):
assert 0 < ratio <= 1, "Train/total data ratio must be in range (0.0, 1.0]"
np.random.seed(self._seed)
indices = np.arange(0, length, 1, dtype=np.int)
np.random.shuffle(indices)
if fold is not None:
indices = deque(indices)
indices.rotate(fold * int((1.0 - ratio) * length))
indices = np.array(indices)
train_indices = indices[:int(ratio * len(indices))]
val_indices = indices[int(ratio * len(indices)):]
else:
train_indices = indices
val_indices = []
return train_indices, val_indices
def _normalize_inputs(self, inputs):
"""Normalize inputs"""
inputs = tf.expand_dims(tf.cast(inputs, tf.float32), -1)
# Center around zero
inputs = tf.divide(inputs, 127.5) - 1
inputs = tf.image.resize_images(inputs, (388, 388))
return tf.image.resize_image_with_crop_or_pad(inputs, 572, 572)
def _normalize_labels(self, labels):
"""Normalize labels"""
labels = tf.expand_dims(tf.cast(labels, tf.float32), -1)
labels = tf.divide(labels, 255)
labels = tf.image.resize_images(labels, (388, 388))
labels = tf.image.resize_image_with_crop_or_pad(labels, 572, 572)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(labels)))
labels = tf.where(cond, tf.zeros(tf.shape(labels)), tf.ones(tf.shape(labels)))
return tf.one_hot(tf.squeeze(tf.cast(labels, tf.int32)), 2)
def _preproc_samples(self, inputs, labels, augment=True):
"""Preprocess samples and perform random augmentations"""
inputs = self._normalize_inputs(inputs)
labels = self._normalize_labels(labels)
if self._augment and augment:
# Horizontal flip
h_flip = tf.random_uniform([]) > 0.5
inputs = tf.cond(h_flip, lambda: tf.image.flip_left_right(inputs), lambda: inputs)
labels = tf.cond(h_flip, lambda: tf.image.flip_left_right(labels), lambda: labels)
# Vertical flip
v_flip = tf.random_uniform([]) > 0.5
inputs = tf.cond(v_flip, lambda: tf.image.flip_up_down(inputs), lambda: inputs)
labels = tf.cond(v_flip, lambda: tf.image.flip_up_down(labels), lambda: labels)
# Prepare for batched transforms
inputs = tf.expand_dims(inputs, 0)
labels = tf.expand_dims(labels, 0)
# Random crop and resize
left = tf.random_uniform([]) * 0.3
right = 1 - tf.random_uniform([]) * 0.3
top = tf.random_uniform([]) * 0.3
bottom = 1 - tf.random_uniform([]) * 0.3
inputs = tf.image.crop_and_resize(inputs, [[top, left, bottom, right]], [0], (572, 572))
labels = tf.image.crop_and_resize(labels, [[top, left, bottom, right]], [0], (572, 572))
# Gray value variations
# Adjust brightness and keep values in range
inputs = tf.image.random_brightness(inputs, max_delta=0.2)
inputs = tf.clip_by_value(inputs, clip_value_min=-1, clip_value_max=1)
inputs = tf.squeeze(inputs, 0)
labels = tf.squeeze(labels, 0)
# Bring back labels to network's output size and remove interpolation artifacts
labels = tf.image.resize_image_with_crop_or_pad(labels, target_width=388, target_height=388)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(labels)))
labels = tf.where(cond, tf.zeros(tf.shape(labels)), tf.ones(tf.shape(labels)))
return (inputs, labels)
def _preproc_eval_samples(self, inputs, labels):
"""Preprocess samples and perform random augmentations"""
inputs = self._normalize_inputs(inputs)
labels = self._normalize_labels(labels)
# Bring back labels to network's output size and remove interpolation artifacts
labels = tf.image.resize_image_with_crop_or_pad(labels, target_width=388, target_height=388)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(labels)))
labels = tf.where(cond, tf.zeros(tf.shape(labels)), tf.ones(tf.shape(labels)))
return (inputs, labels)
def train_fn(self, drop_remainder=False):
"""Input function for training"""
dataset = tf.data.Dataset.from_tensor_slices(
(self._train_images, self._train_masks))
dataset = dataset.shard(self._num_gpus, self._gpu_id)
dataset = dataset.repeat()
dataset = dataset.shuffle(self._batch_size * 3)
dataset = dataset.map(self._preproc_samples,
num_parallel_calls=multiprocessing.cpu_count() // self._num_gpus)
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder)
dataset = dataset.prefetch(self._batch_size)
return dataset
def eval_fn(self, count=1):
"""Input function for validation"""
dataset = tf.data.Dataset.from_tensor_slices(
(self._val_images, self._val_masks))
dataset = dataset.repeat(count=count)
dataset = dataset.map(self._preproc_eval_samples)
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(self._batch_size)
return dataset
def test_fn(self, count):
"""Input function for testing"""
dataset = tf.data.Dataset.from_tensor_slices(
self._test_images)
dataset = dataset.repeat(count=count)
dataset = dataset.map(self._normalize_inputs)
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(self._batch_size)
return dataset
def synth_fn(self):
"""Synthetic data function for testing"""
inputs = tf.truncated_normal((572, 572, 1), dtype=tf.float32, mean=127.5, stddev=1, seed=self._seed,
name='synth_inputs')
masks = tf.truncated_normal((388, 388, 2), dtype=tf.float32, mean=0.01, stddev=0.1, seed=self._seed,
name='synth_masks')
dataset = tf.data.Dataset.from_tensors((inputs, masks))
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return dataset
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/data_loader.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import dllogger as logger
import tensorflow as tf
import horovod.tensorflow as hvd
import numpy as np
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
from utils.model_fn import unet_fn
def set_flags():
os.environ['CUDA_CACHE_DISABLE'] = '1'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
def prepare_model_dir(params):
model_dir = os.path.join(params.model_dir, "model_checkpoint")
model_dir = model_dir if (hvd.rank() == 0 and not params.benchmark) else None
if model_dir is not None:
os.makedirs(model_dir, exist_ok=True)
if ('train' in params.exec_mode) and (not params.resume_training):
os.system('rm -rf {}/*'.format(model_dir))
return model_dir
def build_estimator(params, model_dir):
if params.use_amp:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
np.random.seed(params.seed)
tf.compat.v1.random.set_random_seed(params.seed)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
gpu_options = tf.compat.v1.GPUOptions()
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
if params.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
run_config = tf.estimator.RunConfig(
save_summary_steps=1,
tf_random_seed=params.seed,
session_config=config,
save_checkpoints_steps=(params.max_steps // hvd.size()) if hvd.rank() == 0 else None,
keep_checkpoint_max=1)
estimator = tf.estimator.Estimator(
model_fn=unet_fn,
model_dir=model_dir,
config=run_config,
params=params)
return estimator
def get_logger(params):
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.VERBOSE)]
if params.log_dir:
backends += [JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)]
logger.init(backends=backends)
logger.metadata("eval_dice_score", {"unit": None})
logger.metadata("throughput_test", {"unit": "images/s"})
logger.metadata("throughput_train", {"unit": "images/s"})
return logger
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/setup.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
import tensorflow as tf
from model.unet import unet_v1
# Class Dice coefficient averaged over batch
def dice_coef(predict, target, axis=1, eps=1e-6):
intersection = tf.reduce_sum(predict * target, axis=axis)
union = tf.reduce_sum(predict * predict + target * target, axis=axis)
dice = (2. * intersection + eps) / (union + eps)
return tf.reduce_mean(dice, axis=0) # average over batch
def regularization_l2loss(weight_decay):
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN"""
return all([
tensor_name not in name.lower()
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
])
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def unet_fn(features, labels, mode, params):
""" Model function for tf.Estimator
Controls how the training is performed by specifying how the
total_loss is computed and applied in the backward pass.
Args:
features (tf.Tensor): Tensor samples
labels (tf.Tensor): Tensor labels
mode (tf.estimator.ModeKeys): Indicates if we train, evaluate or predict
params (dict): Additional parameters supplied to the estimator
Returns:
Appropriate tf.estimator.EstimatorSpec for the current mode
"""
dtype = tf.float32
device = '/gpu:0'
global_step = tf.compat.v1.train.get_global_step()
with tf.device(device):
features = tf.cast(features, dtype)
output_map = unet_v1(features=features, mode=mode)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': tf.nn.softmax(output_map, axis=-1)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
n_classes = output_map.shape[-1].value
flat_logits = tf.reshape(tf.cast(output_map, tf.float32),
[tf.shape(output_map)[0], -1, n_classes])
flat_labels = tf.reshape(labels,
[tf.shape(output_map)[0], -1, n_classes])
crossentropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels), name='cross_loss_ref')
dice_loss = tf.reduce_mean(1 - dice_coef(tf.keras.activations.softmax(flat_logits, axis=-1),
flat_labels), name='dice_loss_ref')
total_loss = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {"eval_ce_loss": tf.compat.v1.metrics.mean(crossentropy_loss),
"eval_dice_loss": tf.compat.v1.metrics.mean(dice_loss),
"eval_total_loss": tf.compat.v1.metrics.mean(total_loss),
"eval_dice_score": tf.compat.v1.metrics.mean(1.0 - dice_loss)}
return tf.estimator.EstimatorSpec(mode=mode, loss=dice_loss, eval_metric_ops=eval_metric_ops)
opt = tf.compat.v1.train.AdamOptimizer(learning_rate=params.learning_rate)
opt = hvd.DistributedOptimizer(opt, device_dense='/gpu:0')
with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
deterministic = True
gate_gradients = (
tf.compat.v1.train.Optimizer.GATE_OP
if deterministic
else tf.compat.v1.train.Optimizer.GATE_NONE)
train_op = opt.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op,
eval_metric_ops={})
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/model_fn.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import tensorflow as tf
import horovod.tensorflow as hvd
from dllogger.autologging import log_hardware
from dllogger.logger import LOGGER
import dllogger.logger as dllg
from dllogger import tags
class ProfilerHook(tf.train.SessionRunHook):
def __init__(self, out_dir, global_batch_size, log_every=10, warmup_steps=20):
LOGGER.set_model_name('UNet_TF')
LOGGER.set_backends([
dllg.JsonBackend(log_file=os.path.join(out_dir, 'dlloger_out.json'),
logging_scope=dllg.Scope.TRAIN_ITER, iteration_interval=1),
dllg.StdOutBackend(log_file=None,
logging_scope=dllg.Scope.TRAIN_ITER, iteration_interval=log_every)
])
self._perf = dllg.AverageMeter()
LOGGER.register_metric('loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
LOGGER.register_metric('dice_loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
LOGGER.register_metric('total_loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._current_step = 0
def before_run(self, run_context):
LOGGER.iteration_start()
run_args = tf.train.SessionRunArgs(
fetches=[
'UNet/cross_loss_ref:0',
'UNet/dice_loss_ref:0',
'UNet/total_loss_ref:0']
)
self._t0 = time.time()
return run_args
def after_run(self,
run_context,
run_values):
cross_loss, dice_loss, total_loss = run_values.results
batch_time = time.time() - self._t0
ips = self._global_batch_size / batch_time
ips *= hvd.size()
if self._current_step >= self._warmup_steps:
LOGGER.log("iteration", int(self._current_step))
LOGGER.log("loss", float(cross_loss))
LOGGER.log("dice_loss", float(dice_loss))
LOGGER.log("total_loss", float(total_loss))
self._perf.record(ips)
LOGGER.iteration_stop()
self._current_step += 1
def begin(self):
log_hardware(LOGGER)
LOGGER.log(tags.RUN_INIT)
def end(self, session):
LOGGER.log(tags.RUN_FINAL)
LOGGER.finish()
LOGGER.log("average_images_per_second", self._perf.get_value())
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/hooks/profiler_hook.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import horovod.tensorflow as hvd
class TrainingHook(tf.estimator.SessionRunHook):
def __init__(self, logger, max_steps, log_every=1):
self._log_every = log_every
self._iter_idx = 0
self.logger = logger
self.max_steps = max_steps
def before_run(self, run_context):
run_args = tf.estimator.SessionRunArgs(
fetches=[
'cross_loss_ref:0',
'dice_loss_ref:0',
'total_loss_ref:0',
]
)
return run_args
def after_run(self,
run_context,
run_values):
cross_loss, dice_loss, total_loss = run_values.results
if (self._iter_idx % self._log_every == 0) and (hvd.rank() == 0):
self.logger.log(step=(self._iter_idx, self.max_steps),
data={'train_ce_loss': float(cross_loss),
'train_dice_loss': float(dice_loss),
'train_total_loss': float(total_loss)})
self._iter_idx += 1
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/hooks/training_hook.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from utils.parse_results import process_performance_stats
class ProfilingHook(tf.estimator.SessionRunHook):
def __init__(self, logger, batch_size, log_every, warmup_steps, mode):
self._log_every = log_every
self._warmup_steps = warmup_steps
self._current_step = 0
self._global_batch_size = batch_size * hvd.size()
self._t0 = 0
self._timestamps = []
self.logger = logger
self.mode = mode
def before_run(self, run_context):
if self._current_step > self._warmup_steps:
self._t0 = time.time()
def after_run(self,
run_context,
run_values):
if self._current_step > self._warmup_steps:
self._timestamps.append(time.time() - self._t0)
self._current_step += 1
def begin(self):
pass
def end(self, session):
if hvd.rank() == 0:
stats = process_performance_stats(np.array(self._timestamps),
self._global_batch_size,
self.mode)
self.logger.log(step=(), data=stats)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/utils/hooks/profiling_hook.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model construction utils
This module provides a convenient way to create different topologies
based around UNet.
"""
import tensorflow as tf
from model.layers import output_block, upsample_block, bottleneck, downsample_block, input_block
def unet_v1(features, mode):
""" U-Net: Convolutional Networks for Biomedical Image Segmentation
Source:
https://arxiv.org/pdf/1505.04597
"""
skip_connections = []
out, skip = input_block(features, filters=64)
skip_connections.append(skip)
for idx, filters in enumerate([128, 256, 512]):
out, skip = downsample_block(out, filters=filters, idx=idx)
skip_connections.append(skip)
out = bottleneck(out, filters=1024, mode=mode)
for idx, filters in enumerate([512, 256, 128]):
out = upsample_block(out,
residual_input=skip_connections.pop(),
filters=filters,
idx=idx)
return output_block(out, residual_input=skip_connections.pop(), filters=64, n_classes=2)
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/model/unet.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
""" Contains a set of utilities that allow building the UNet model
"""
import tensorflow as tf
def _crop_and_concat(inputs, residual_input):
""" Perform a central crop of ``residual_input`` and concatenate to ``inputs``
Args:
inputs (tf.Tensor): Tensor with input
residual_input (tf.Tensor): Residual input
Return:
Concatenated tf.Tensor with the size of ``inputs``
"""
factor = inputs.shape[1].value / residual_input.shape[1].value
return tf.concat([inputs, tf.image.central_crop(residual_input, factor)], axis=-1)
def downsample_block(inputs, filters, idx):
""" UNet downsample block
Perform 2 unpadded convolutions with a specified number of filters and downsample
through max-pooling
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
out = inputs
with tf.name_scope('downsample_block_{}'.format(idx)):
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
return tf.layers.max_pooling2d(inputs=out, pool_size=(2, 2), strides=2), out
def upsample_block(inputs, residual_input, filters, idx):
""" UNet upsample block
Perform 2 unpadded convolutions with a specified number of filters and upsample
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
Return:
Convolved ``inputs`` after upsampling
"""
out = _crop_and_concat(inputs, residual_input)
with tf.name_scope('upsample_block_{}'.format(idx)):
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
out = tf.layers.conv2d(inputs=out,
filters=int(filters),
kernel_size=(3, 3),
activation=tf.nn.relu)
return tf.layers.conv2d_transpose(inputs=out,
filters=int(filters // 2),
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)
def bottleneck(inputs, filters, mode):
""" UNet central block
Perform 2 unpadded convolutions with a specified number of filters and upsample
including dropout before upsampling for training
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Convolved ``inputs`` after upsampling
"""
out = inputs
with tf.name_scope('bottleneck'):
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
training = (mode == tf.estimator.ModeKeys.TRAIN)
out = tf.layers.dropout(out, rate=0.5, training=training)
return tf.layers.conv2d_transpose(inputs=out,
filters=filters // 2,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)
def output_block(inputs, residual_input, filters, n_classes):
""" UNet output
Perform 3 unpadded convolutions, the last one with the same number
of channels as classes we want to classify
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
n_classes (int): Number of output classes
Return:
Convolved ``inputs`` with as many channels as classes
"""
out = _crop_and_concat(inputs, residual_input)
with tf.name_scope('output'):
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
return tf.layers.conv2d(inputs=out,
filters=n_classes,
kernel_size=(1, 1),
activation=None)
def input_block(inputs, filters):
""" UNet input block
Perform 2 unpadded convolutions with a specified number of filters and downsample
through max-pooling. First convolution
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
out = inputs
with tf.name_scope('input'):
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
out = tf.layers.conv2d(inputs=out,
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
return tf.layers.max_pooling2d(inputs=out, pool_size=(2, 2), strides=2), out
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/model/layers.py |
import glob
import inspect
import os
import shutil
import subprocess
from typing import List, Callable
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def _compress(src_path: str, dst_path: str):
"""
Compress source path into destination path
:param src_path: (str) Source path
:param dst_path: (str) Destination path
"""
print('[*] Compressing...')
shutil.make_archive(dst_path, 'zip', src_path)
print('[*] Compressed the contents in: {}.zip'.format(dst_path))
def _print_input(func: Callable):
"""
Decorator printing function name and args
:param func: (Callable) Decorated function
:return: Wrapped call
"""
def wrapper(*args, **kwargs):
"""
Print the name and arguments of a function
:param args: Named arguments
:param kwargs: Keyword arguments
:return: Original function call
"""
tf.logging.set_verbosity(tf.logging.ERROR)
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ''.join('\t{} = {!r}\n'.format(*item) for item in func_args.items())
print('[*] Running \'{}\' with arguments:'.format(func.__qualname__))
print(func_args_str[:-1])
return func(*args, **kwargs)
return wrapper
def _parse_placeholder_types(values: str):
"""
Extracts placeholder types from a comma separate list.
:param values: (str) Placeholder types
:return: (List) Placeholder types
"""
values = [int(value) for value in values.split(",")]
return values if len(values) > 1 else values[0]
def _optimize_checkpoint_for_inference(graph_path: str,
input_names: List[str],
output_names: List[str]):
"""
Removes Horovod and training related information from the graph
:param graph_path: (str) Path to the graph.pbtxt file
:param input_names: (str) Input node names
:param output_names: (str) Output node names
"""
print('[*] Optimizing graph for inference ...')
input_graph_def = graph_pb2.GraphDef()
with gfile.Open(graph_path, "rb") as f:
data = f.read()
text_format.Merge(data.decode("utf-8"), input_graph_def)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
input_names,
output_names,
_parse_placeholder_types(str(dtypes.float32.as_datatype_enum)),
False)
print('[*] Saving original graph in: {}'.format(graph_path + '.old'))
shutil.move(graph_path, graph_path + '.old')
print('[*] Writing down optimized graph ...')
graph_io.write_graph(output_graph_def,
os.path.dirname(graph_path),
os.path.basename(graph_path))
@_print_input
def to_savedmodel(input_shape: str,
model_fn: Callable,
checkpoint_dir: str,
output_dir: str,
input_names: List[str],
output_names: List[str],
use_amp: bool,
use_xla: bool,
compress: bool):
"""
Export checkpoint to Tensorflow savedModel
:param input_shape: (str) Input shape to the model in format [batch, height, width, channels]
:param model_fn: (Callable) Estimator's model_fn
:param checkpoint_dir: (str) Directory where checkpoints are stored
:param output_dir: (str) Output directory for storage of the generated savedModel
:param input_names: (List[str]) Input node names
:param output_names: (List[str]) Output node names
:param use_amp: (bool )Enable TF-AMP
:param use_xla: (bool) Enable XLA
:param compress: (bool) Compress output
"""
assert os.path.exists(checkpoint_dir), 'Path not found: {}'.format(checkpoint_dir)
assert input_shape is not None, 'Input shape must be provided'
_optimize_checkpoint_for_inference(os.path.join(checkpoint_dir, 'graph.pbtxt'), input_names, output_names)
try:
ckpt_path = os.path.splitext([p for p in glob.iglob(os.path.join(checkpoint_dir, '*.index'))][0])[0]
except IndexError:
raise ValueError('Could not find checkpoint in directory: {}'.format(checkpoint_dir))
config_proto = tf.compat.v1.ConfigProto()
config_proto.allow_soft_placement = True
config_proto.log_device_placement = False
config_proto.gpu_options.allow_growth = True
config_proto.gpu_options.force_gpu_compatible = True
if use_amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
if use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
run_config = tf.estimator.RunConfig(
model_dir=None,
tf_random_seed=None,
save_summary_steps=1e9, # disabled
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=config_proto,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=1e9, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=ckpt_path,
config=run_config,
params={'dtype': tf.float16 if use_amp else tf.float32}
)
print('[*] Exporting the model ...')
input_type = tf.float16 if use_amp else tf.float32
def get_serving_input_receiver_fn():
def serving_input_receiver_fn():
features = tf.placeholder(dtype=input_type, shape=input_shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features)
return serving_input_receiver_fn
export_path = estimator.export_saved_model(
export_dir_base=output_dir,
serving_input_receiver_fn=get_serving_input_receiver_fn(),
checkpoint_path=ckpt_path
)
print('[*] Done! path: `%s`' % export_path.decode())
if compress:
_compress(export_path.decode(), os.path.join(output_dir, 'saved_model'))
@_print_input
def to_tf_trt(savedmodel_dir: str,
output_dir: str,
precision: str,
feed_dict_fn: Callable,
num_runs: int,
output_tensor_names: List[str],
compress: bool):
"""
Export Tensorflow savedModel to TF-TRT
:param savedmodel_dir: (str) Input directory containing a Tensorflow savedModel
:param output_dir: (str) Output directory for storage of the generated TF-TRT exported model
:param precision: (str) Desired precision of the network (FP32, FP16 or INT8)
:param feed_dict_fn: (Callable) Input tensors for INT8 calibration. Model specific.
:param num_runs: (int) Number of calibration runs.
:param output_tensor_names: (List) Name of the output tensor for graph conversion. Model specific.
:param compress: (bool) Compress output
"""
if savedmodel_dir is None or not os.path.exists(savedmodel_dir):
raise FileNotFoundError('savedmodel_dir not found: {}'.format(savedmodel_dir))
if os.path.exists(output_dir):
print('[*] Output dir \'{}\' is not empty. Cleaning up ...'.format(output_dir))
shutil.rmtree(output_dir)
print('[*] Converting model...')
converter = trt.TrtGraphConverter(input_saved_model_dir=savedmodel_dir,
precision_mode=precision)
converter.convert()
if precision == 'INT8':
print('[*] Running INT8 calibration ...')
converter.calibrate(fetch_names=output_tensor_names, num_runs=num_runs, feed_dict_fn=feed_dict_fn)
converter.save(output_dir)
print('[*] Done! TF-TRT saved_model stored in: `%s`' % output_dir)
if compress:
_compress('tftrt_saved_model', output_dir)
@_print_input
def to_onnx(input_dir: str, output_dir: str, compress: bool):
"""
Convert Tensorflow savedModel to ONNX with tf2onnx
:param input_dir: (str) Input directory with a Tensorflow savedModel
:param output_dir: (str) Output directory where to store the ONNX version of the model
:param compress: (bool) Compress output
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_name = os.path.join(output_dir, 'model.onnx')
print('[*] Converting model...')
ret = subprocess.call(['python', '-m', 'tf2onnx.convert',
'--saved-model', input_dir,
'--output', file_name],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
if ret > 0:
raise RuntimeError('tf2onnx.convert has failed with error: {}'.format(ret))
print('[*] Done! ONNX file stored in: %s' % file_name)
if compress:
_compress(output_dir, 'onnx_model')
| DeepLearningExamples-master | TensorFlow/Segmentation/UNet_Medical/tf_exports/tf_export.py |
#!/usr/bin/env python
# coding=utf-8
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import zipfile
from io import open
if os.path.exists('train.txt'):
print('Tokenized text8 already exists - skipping processing')
sys.exit()
data = zipfile.ZipFile('text8.zip').extractall()
data = open('text8', 'r', encoding='utf-8').read()
print('Length of text8: {}'.format(len(data)))
num_test_chars = 5000000
train_data = data[: -2 * num_test_chars]
valid_data = data[-2 * num_test_chars: -num_test_chars]
test_data = data[-num_test_chars:]
for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]:
print('{} will have {} bytes'.format(fn, len(part)))
print('- Tokenizing...')
# Change space ' ' to underscore '_'
part_str = ' '.join(['_' if c == ' ' else c for c in part.strip()])
print('- Writing...')
f = open(fn, 'w').write(part_str)
f = open(fn + '.raw', 'w', encoding='utf-8').write(part)
| DeepLearningExamples-master | TensorFlow/LanguageModeling/Transformer-XL/prep_text8.py |
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
class LAMBOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, wd= 0.01, beta1=0.9, beta2=0.999, epsilon=1e-6,
use_locking=False, name="LAMB"):
super(LAMBOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._wd = wd
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._wd_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2,
name="beta2_power",
colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
wd = self._call_if_callable(self._wd)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
self._wd_t = ops.convert_to_tensor(wd, name="wd")
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
eps = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
wd_lambda = math_ops.cast(self._wd_t, var.dtype.base_dtype)
v = self.get_slot(var, "v")
v_t = v.assign(beta2_t * v + (1. - beta2_t) * grad**2)
m = self.get_slot(var, "m")
m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad)
# add l2 normalizations and set ratio
r1 = tf.sqrt(tf.reduce_sum(tf.square(var)))
step = m_t / (tf.sqrt(v_t) + eps) + wd_lambda * var
r2 = tf.sqrt(tf.reduce_sum(tf.square(step)))
ratio = array_ops.where(math_ops.greater(r1, 0), array_ops.where(
math_ops.greater(r2, 0), tf.minimum(r1, 10) / r2, 1.0), 1.0)
var_update = state_ops.assign_sub(var, lr_t * ratio * step)
return control_flow_ops.group(*[var_update, v_t, m_t])
def _resource_apply_dense(self, grad, var):
return None
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
step = m_t / (v_sqrt + epsilon_t)
w_norm = linalg_ops.norm(var, ord=2)
g_norm = linalg_ops.norm(step, ord=2)
ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(
math_ops.greater(g_norm, 0), tf.minimum(w_norm, 10) / g_norm, 1.0), 1.0)
var_update = state_ops.assign_sub(
var, ratio * lr_t * step, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
| DeepLearningExamples-master | TensorFlow/LanguageModeling/Transformer-XL/tf/lamb.py |
import tensorflow as tf
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
if bsz is not None:
return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
else:
return pos_emb[:, None, :]
def positionwise_FF(inp, d_model, d_inner, dropout, kernel_initializer,
scope='ff', is_training=True):
output = inp
with tf.variable_scope(scope):
output = tf.layers.dense(inp, d_inner, activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
name='layer_1')
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_1')
output = tf.layers.dense(output, d_model,
kernel_initializer=kernel_initializer,
name='layer_2')
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_2')
output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1)
return output
def rel_shift(x):
x_size = tf.shape(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [x_size[0], x_size[1], x_size[3] + 1, x_size[2]])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
x = tf.reshape(x, x_size)
return x
def rel_multihead_attn(w, r, r_w_bias, r_r_bias, attn_mask, mems, d_model,
n_head, d_head, dropout, dropatt, is_training,
kernel_initializer, scope='rel_attn'):
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope):
qlen = tf.shape(w)[0]
rlen = tf.shape(r)[0]
bsz = tf.shape(w)[1]
cat = tf.concat([mems, w],
0) if mems is not None and mems.shape.ndims > 1 else w
w_heads = tf.layers.dense(cat, 3 * n_head * d_head, use_bias=False,
kernel_initializer=kernel_initializer, name='qkv')
r_head_k = tf.layers.dense(r, n_head * d_head, use_bias=False,
kernel_initializer=kernel_initializer, name='r')
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, -1)
w_head_q = w_head_q[-qlen:]
klen = tf.shape(w_head_k)[0]
w_head_q = tf.reshape(w_head_q, [qlen, bsz, n_head, d_head])
w_head_k = tf.reshape(w_head_k, [klen, bsz, n_head, d_head])
w_head_v = tf.reshape(w_head_v, [klen, bsz, n_head, d_head])
r_head_k = tf.reshape(r_head_k, [rlen, n_head, d_head])
rw_head_q = w_head_q + r_w_bias
rr_head_q = w_head_q + r_r_bias
AC = tf.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k)
BD = tf.einsum('ibnd,jnd->bnij', rr_head_q, r_head_k)
BD = rel_shift(BD)
attn_score = (AC + BD) * scale
attn_mask_t = attn_mask[None, None, :, :]
attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t
attn_prob = tf.nn.softmax(attn_score, 3)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v)
size_t = tf.shape(attn_vec)
attn_vec = tf.reshape(attn_vec, [size_t[0], size_t[1], n_head * d_head])
attn_out = tf.layers.dense(attn_vec, d_model, use_bias=False,
kernel_initializer=kernel_initializer, name='o')
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
output = tf.contrib.layers.layer_norm(attn_out + w, begin_norm_axis=-1)
return output
def embedding_lookup(lookup_table, x, use_tpu=True):
if use_tpu:
n_token = tf.shape(lookup_table)[0]
one_hot_idx = tf.one_hot(x, n_token)
if one_hot_idx.shape.ndims == 2:
return tf.einsum('nd,in->id', lookup_table, one_hot_idx)
else:
return tf.einsum('nd,ibn->ibd', lookup_table, one_hot_idx)
else:
return tf.nn.embedding_lookup(lookup_table, x)
def mask_adaptive_embedding_lookup(x, n_token, d_embed, d_proj, cutoffs, initializer,
proj_initializer, div_val=1,
proj_same_dim=True,
scope='adaptive_embed', **kwargs):
emb_scale = d_proj ** 0.5
with tf.variable_scope(scope):
if div_val == 1:
lookup_table = tf.get_variable('lookup_table', [n_token, d_embed],
initializer=initializer)
y = embedding_lookup(lookup_table, x, use_tpu=False)
if d_proj != d_embed:
proj_W = tf.get_variable('proj_W', [d_embed, d_proj],
initializer=proj_initializer)
y = tf.einsum('ibe,ed->ibd', y, proj_W)
else:
proj_W = None
ret_params = [lookup_table, proj_W]
else:
tables, projs = [], []
cutoff_ends = [0] + cutoffs + [n_token]
x_size = tf.shape(x)
y = tf.zeros([x_size[0], x_size[1], d_proj])
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (x >= l_idx) & (x < r_idx)
cur_x = tf.boolean_mask(x, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
lookup_table = tf.get_variable('lookup_table',
[r_idx - l_idx, cur_d_embed],
initializer=initializer)
cur_y = embedding_lookup(lookup_table, cur_x, use_tpu=False)
if d_proj == cur_d_embed and not proj_same_dim:
proj_W = None
else:
proj_W = tf.get_variable('proj_W', [cur_d_embed, d_proj],
initializer=proj_initializer)
cur_y = tf.einsum('id,de->ie', cur_y, proj_W)
mask_idx = tf.to_int64(tf.where(mask))
y += tf.scatter_nd(mask_idx, cur_y, tf.to_int64(tf.shape(y)))
tables.append(lookup_table)
projs.append(proj_W)
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def mul_adaptive_embedding_lookup(x, n_token, d_embed, d_proj, cutoffs, initializer,
proj_initializer, div_val=1, perms=None,
proj_same_dim=True,
scope='adaptive_embed'):
"""
perms: If None, first compute W = W1 x W2 (projection for each bin),
and then compute X x W (embedding lookup). If not None,
use bin-based embedding lookup with max_bin_size defined by
the shape of perms.
"""
emb_scale = d_proj ** 0.5
with tf.variable_scope(scope):
if div_val == 1:
lookup_table = tf.get_variable('lookup_table', [n_token, d_embed],
initializer=initializer)
y = embedding_lookup(lookup_table, x)
if d_proj != d_embed:
proj_W = tf.get_variable('proj_W', [d_embed, d_proj],
initializer=proj_initializer)
y = tf.einsum('ibe,ed->ibd', y, proj_W)
else:
proj_W = None
ret_params = [lookup_table, proj_W]
else:
tables, projs = [], []
cutoff_ends = [0] + cutoffs + [n_token]
x_size = tf.shape(x)
if perms is None:
cat_lookup = []
else:
cat_lookup = tf.zeros([x_size[0], x_size[1], d_proj])
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
cur_d_embed = d_embed // (div_val ** i)
lookup_table = tf.get_variable('lookup_table',
[r_idx - l_idx, cur_d_embed],
initializer=initializer)
if cur_d_embed == d_proj and not proj_same_dim:
proj_W = None
else:
proj_W = tf.get_variable('proj_W', [cur_d_embed, d_proj],
initializer=proj_initializer)
if perms is None:
cat_lookup.append(tf.einsum('ie,ed->id', lookup_table, proj_W))
else:
# speed up the computation of the first bin
# also save some meory
if i == 0:
cur_y = embedding_lookup(lookup_table, tf.minimum(x, r_idx - 1))
if proj_W is not None:
cur_y = tf.einsum('ibe,ed->ibd', cur_y, proj_W)
cur_y *= perms[i][:, :, None]
cat_lookup += cur_y
else:
cur_x = tf.einsum('ib,ibk->k', tf.to_float(x - l_idx), perms[i])
cur_x = tf.to_int32(cur_x)
cur_y = embedding_lookup(lookup_table, cur_x)
if proj_W is not None:
cur_y = tf.einsum('ke,ed->kd', cur_y, proj_W)
cat_lookup += tf.einsum('kd,ibk->ibd', cur_y, perms[i])
tables.append(lookup_table)
projs.append(proj_W)
if perms is None:
cat_lookup = tf.concat(cat_lookup, 0)
y = embedding_lookup(cat_lookup, x)
else:
y = cat_lookup
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def mask_adaptive_logsoftmax(hidden, target, n_token, d_embed, d_proj, cutoffs,
params, tie_projs,
initializer=None, proj_initializer=None,
div_val=1, scope='adaptive_softmax',
proj_same_dim=True,
return_mean=True, **kwargs):
def _logit(x, W, b, proj):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
params_W, params_projs = params[0], params[1]
def _gather_logprob(logprob, target):
lp_size = tf.shape(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable('bias', [n_token],
initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target,
logits=output)
else:
cutoff_ends = [0] + cutoffs + [n_token]
nll = tf.zeros_like(target, dtype=tf.float32)
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx: r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable('b', [r_idx - l_idx],
initializer=tf.zeros_initializer())
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable('proj', [cur_d_embed, d_proj],
initializer=proj_initializer)
if i == 0:
cluster_W = tf.get_variable('cluster_W', [len(cutoffs), d_embed],
initializer=tf.zeros_initializer())
cluster_b = tf.get_variable('cluster_b', [len(cutoffs)],
initializer=tf.zeros_initializer())
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_logprob = tf.nn.log_softmax(head_logit)
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = _gather_logprob(cur_head_logprob, cur_target)
else:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_hidden = tf.boolean_mask(hidden, mask)
tail_logit = tf.squeeze(_logit(
cur_hidden[None], cur_W, cur_b, cur_proj), 0)
tail_logprob = tf.nn.log_softmax(tail_logit)
cur_logprob = (cur_head_logprob[:, cutoff_ends[1] + i - 1] +
_gather_logprob(tail_logprob, cur_target))
nll += tf.scatter_nd(mask_idx, -cur_logprob,
tf.to_int64(tf.shape(nll)))
if return_mean:
nll = tf.reduce_mean(nll)
return nll
def mul_adaptive_logsoftmax(hidden, target, n_token, d_embed, d_proj, cutoffs,
params, tie_projs,
initializer=None, proj_initializer=None,
div_val=1, perms=None, proj_same_dim=True,
scope='adaptive_softmax',
**kwargs):
def _logit(x, W, b, proj):
y = x
if x.shape.ndims == 3:
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
else:
if proj is not None:
y = tf.einsum('id,ed->ie', y, proj)
return tf.einsum('id,nd->in', y, W) + b
params_W, params_projs = params[0], params[1]
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable('bias', [n_token],
initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target,
logits=output)
nll = tf.reduce_mean(nll)
else:
total_loss, total_cnt = 0, 0
cutoff_ends = [0] + cutoffs + [n_token]
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx: r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable('b', [r_idx - l_idx],
initializer=tf.zeros_initializer())
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable('proj', [cur_d_embed, d_proj],
initializer=proj_initializer)
if i == 0:
cluster_W = tf.get_variable('cluster_W', [len(cutoffs), d_embed],
initializer=tf.zeros_initializer())
cluster_b = tf.get_variable('cluster_b', [len(cutoffs)],
initializer=tf.zeros_initializer())
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_target = kwargs.get("head_target")
head_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=head_target,
logits=head_logit)
masked_loss = head_nll * perms[i]
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(perms[i])
else:
cur_head_nll = tf.einsum('ib,ibk->k', head_nll, perms[i])
cur_hidden = tf.einsum('ibd,ibk->kd', hidden, perms[i])
tail_logit = _logit(cur_hidden, cur_W, cur_b, cur_proj)
tail_target = tf.einsum('ib,ibk->k', tf.to_float(target - l_idx),
perms[i])
tail_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.to_int32(tail_target),
logits=tail_logit)
sum_nll = cur_head_nll + tail_nll
mask = tf.reduce_sum(perms[i], [0, 1])
masked_loss = sum_nll * mask
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(mask)
nll = total_loss / total_cnt
return nll
def _create_mask(qlen, mlen, same_length=False):
attn_mask = tf.ones([qlen, qlen])
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen])
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def _cache_mem(curr_out, prev_mem, mem_len=None):
if mem_len is None or prev_mem is None:
new_mem = curr_out
elif mem_len == 0:
return prev_mem
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[- mem_len:]
return tf.stop_gradient(new_mem)
def transformer(dec_inp, target, mems, n_token, n_layer, d_model, d_embed,
n_head, d_head, d_inner, dropout, dropatt,
initializer, is_training, proj_initializer=None,
mem_len=None, cutoffs=[], div_val=1, tie_projs=[],
same_length=False, clamp_len=-1, use_tpu=False,
input_perms=None, target_perms=None, head_target=None,
untie_r=False, proj_same_dim=True,
scope='transformer'):
"""
cutoffs: a list of python int. Cutoffs for adaptive softmax.
tie_projs: a list of python bools. Whether to tie the projections.
use_tpu: if True, use one_hot in embedding lookup and bin-based implementation
of adaptive softmax.
perms: a list of tensors. Each tensor should of size [len, bsz, bin_size].
Only used in the adaptive setting.
"""
new_mems = []
with tf.variable_scope(scope):
if untie_r:
r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head],
initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head],
initializer=initializer)
else:
r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head],
initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head],
initializer=initializer)
qlen = tf.shape(dec_inp)[0]
mlen = tf.shape(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
if proj_initializer is None:
proj_initializer = initializer
lookup_fn = (mul_adaptive_embedding_lookup if use_tpu else
mask_adaptive_embedding_lookup)
embeddings, shared_params = lookup_fn(
x=dec_inp,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val= div_val,
perms=input_perms,
proj_same_dim=proj_same_dim)
attn_mask = _create_mask(qlen, mlen, same_length)
pos_seq = tf.range(klen - 1, -1, -1.0)
if clamp_len > 0:
pos_seq = tf.minimum(pos_seq, clamp_len)
inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model))
pos_emb = positional_embedding(pos_seq, inv_freq)
output = tf.layers.dropout(embeddings, dropout, training=is_training)
pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training)
if mems is None:
mems = [None] * n_layer
for i in range(n_layer):
# cache new mems
new_mems.append(_cache_mem(output, mems[i], mem_len))
with tf.variable_scope('layer_{}'.format(i)):
output = rel_multihead_attn(
w=output,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
attn_mask=attn_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
dropout=dropout,
dropatt=dropatt,
is_training=is_training,
kernel_initializer=initializer)
output = positionwise_FF(
inp=output,
d_model=d_model,
d_inner=d_inner,
dropout=dropout,
kernel_initializer=initializer,
is_training=is_training)
output = tf.layers.dropout(output, dropout, training=is_training)
logsoftmax_fn = (mul_adaptive_logsoftmax if use_tpu else
mask_adaptive_logsoftmax)
loss = logsoftmax_fn(
hidden=output,
target=target,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
params=shared_params,
tie_projs=tie_projs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val=div_val,
perms=target_perms,
head_target=head_target,
proj_same_dim=proj_same_dim)
return loss, new_mems
| DeepLearningExamples-master | TensorFlow/LanguageModeling/Transformer-XL/tf/model.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.