seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
with tf.variable_scope(name) as scope:
bn = tf.contrib.layers.batch_norm(
|
tensorflow.contrib.layers.batch_norm
| 9,600 |
from tensorflow.python.framework import ops as _ops
_ops.RegisterShape("KernelLabel")(None)
_old_outputs = [""]
|
tensorflow.python.framework.ops.RegisterShape
| 9,601 |
import tensorflow as tf
with tf.variable_scope("Model_Encoder_Layer"):
inputs = tf.concat(attention_outputs, axis = -1)
self.enc = [conv(inputs, d, name = "input_projection")]
for i in range(3):
if i % 2 == 0: # dropout every 2 blocks
self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout)
self.enc.append(
residual_block(self.enc[i],
num_blocks = 7,
num_conv_layers = 2,
kernel_size = 5,
|
tensorflow.nn.dropout
| 9,602 |
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
|
tensorflow.global_variables_initializer
| 9,603 |
import tensorflow as tf
action_dtype = self._parse_dtype(self._batch_env.action_space)
print(action_dtype)
with tf.variable_scope('env_temporary'):
self._observ = tf.Variable(
lambda: tf.zeros(batch_dims + observ_shape, observ_dtype),
name='observ', trainable=False)
self._action = tf.Variable(
lambda: tf.zeros(batch_dims + action_shape, action_dtype),
name='action', trainable=False)
self._reward = tf.Variable(
lambda: tf.zeros(batch_dims, tf.float32),
name='reward', trainable=False)
self._done = tf.Variable(
lambda: tf.cast(tf.ones(batch_dims), tf.bool),
name='done', trainable=False)
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
|
tensorflow.zeros
| 9,604 |
import tensorflow as tf
_validate_input_parameters(is_tensor=True, shape=shape)
num_dims = shape.size
tt_rank = np.ones(num_dims + 1)
tt_cores = num_dims * [None]
with tf.name_scope(name):
for i in range(num_dims):
curr_core_shape = (1, shape[i], 1)
tt_cores[i] = tf.zeros(curr_core_shape, dtype=dtype)
return TensorTrain(tt_cores, shape, tt_rank)
def eye(shape, dtype=tf.float32, name='t3f_eye'):
"""Creates an identity TT-matrix.
|
tensorflow.zeros
| 9,605 |
import tensorflow as tf
Args:
y: matrix of true probabilities same size as probs
probs: matrix of probabilities for the minibatch
eps: value to clip the probabilities at
class_weights: vector of relative weights to be assigned to each class
sumd: dimensions along which to sum the x-ent matrix
Returns:
cross entropy loss for each example in the minibatch
"""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
xent_mat = -y * tf.log(adjusted_probs)
if class_weights is not None:
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
|
tensorflow.clip_by_value
| 9,606 |
import tensorflow as tf
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
|
tensorflow.layers.dense
| 9,607 |
import tensorflow as tf
>>> samples.dtype
dtype('float32')
"""
mu, var = self.build_posterior_mean_var(X, Y, test_points, True)
jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06
L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter)
V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples]
V = tf.random_normal(V_shape, dtype=L.dtype)
samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
return tf.transpose(samples)
#samples = []
#for i in range(self.num_latent_functions):
# L = tf.cholesky(var[:, :, i] + jitter)
# V = tf.random_normal([tf.shape(L)[0], num_samples], dtype=L.dtype)
# samples.append(mu[:, i:i + 1] + tf.matmul(L, V)) # broadcast
#return tf.transpose(tf.pack(samples))
|
tensorflow.batch_matmul
| 9,608 |
import tensorflow as tf
{
"step": global_step,
"loss": loss,
"chars": tf.shape(features["chars"]),
"source": tf.shape(features["source"]),
#"bert": tf.shape(features["bert"]),
|
tensorflow.shape
| 9,609 |
import tensorflow as tf
tf.expand_dims(tf.nn.softmax(logits_reversed), 4))
for output in sorted(outputs_to_predictions):
predictions = outputs_to_predictions[output]
# Compute average prediction across different scales and flipped images.
predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4)
outputs_to_predictions[output] = tf.argmax(predictions, 3)
return outputs_to_predictions
def predict_labels(images, model_options, image_pyramid=None):
|
tensorflow.argmax
| 9,610 |
import tensorflow as tf
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
|
tensorflow.placeholder
| 9,611 |
from tensorflow.python.ops import nn
self._num_label_columns(),
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="dnn_logit")
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s:activation" % tag, value)
def _linear_logits(self, features):
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_linear_feature_columns(),
num_outputs=self._num_label_columns(),
weight_collections=[self._linear_weight_collection],
name="linear")
|
tensorflow.python.ops.nn.zero_fraction
| 9,612 |
import tensorflow as tf
self.r = tf.placeholder(tf.float32, (None,1))
self.ac = tf.placeholder(tf.float32, (None, self.act_space))
|
tensorflow.placeholder
| 9,613 |
import tensorflow as tf
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
|
tensorflow.variable_scope
| 9,614 |
import tensorflow as tf
class Model(object):
def __init__(self, config, batch, word_mat=None, char_mat=None, trainable=True, opt=True, demo = False, graph = None):
self.config = config
self.demo = demo
self.graph = graph if graph is not None else tf.Graph()
with self.graph.as_default():
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
self.dropout = tf.placeholder_with_default(0.0, (), name="dropout")
if self.demo:
self.c = tf.placeholder(tf.int32, [None, config.test_para_limit],"context")
self.q = tf.placeholder(tf.int32, [None, config.test_ques_limit],"question")
self.ch = tf.placeholder(tf.int32, [None, config.test_para_limit, config.char_limit],"context_char")
self.qh = tf.placeholder(tf.int32, [None, config.test_ques_limit, config.char_limit],"question_char")
self.y1 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index1")
self.y2 = tf.placeholder(tf.int32, [None, config.test_para_limit],"answer_index2")
else:
self.c, self.q, self.ch, self.qh, self.y1, self.y2, self.qa_id = batch.get_next()
# self.word_unk = tf.get_variable("word_unk", shape = [config.glove_dim], initializer=initializer())
self.word_mat = tf.get_variable("word_mat", initializer=tf.constant(
word_mat, dtype=tf.float32), trainable=False)
self.char_mat = tf.get_variable(
"char_mat", initializer=tf.constant(char_mat, dtype=tf.float32))
self.c_mask = tf.cast(self.c, tf.bool)
self.q_mask = tf.cast(self.q, tf.bool)
self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)
|
tensorflow.placeholder
| 9,615 |
import tensorflow as tf
vec row, should agree with vecs in shape[0]
Output:
A tensor of shape (vec_dim)
"""
if reduction_mode == 'max':
print('USING MAX POOLING FOR REDUCTION!')
vecs_reduced = tf.segment_max(vecs, segment_inds)
elif reduction_mode == 'mean':
print('USING AVG POOLING FOR REDUCTION!')
vecs_reduced = tf.segment_mean(vecs, segment_inds)
vecs_reduced.set_shape([num_segments, vecs.get_shape()[1]])
return vecs_reduced
|
tensorflow.segment_mean
| 9,616 |
import tensorflow as tf
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(tf.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {v.name.split("/", 1)[-1]: v
for v in variables_fp_false}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(tf.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
|
tensorflow.global_variables_initializer
| 9,617 |
import tensorflow as tf
util.export_state_tuples(self._initial_state, self._initial_state_name)
util.export_state_tuples(self._final_state, self._final_state_name)
def import_ops(self):
"""Imports ops from collections."""
if self._is_training:
self._train_op = tf.get_collection_ref("train_op")[0]
self._lr = tf.get_collection_ref("lr")[0]
self._new_lr = tf.get_collection_ref("new_lr")[0]
self._lr_update = tf.get_collection_ref("lr_update")[0]
rnn_params = tf.get_collection_ref("rnn_params")
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
self._cell,
self._cell.params_to_canonical,
self._cell.canonical_to_params,
rnn_params,
base_variable_scope="Model/RNN")
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0]
num_replicas = FLAGS.num_gpus if self._name == "Train" else 1
self._initial_state = util.import_state_tuples(
self._initial_state, self._initial_state_name, num_replicas)
self._final_state = util.import_state_tuples(
|
tensorflow.contrib.cudnn_rnn.RNNParamsSaveable
| 9,618 |
from tensorflow.python.framework import ops
# Gradients registration.
@ops.RegisterGradient("SparseGather")
def _sparse_gather_grad(op, grad):
# x is shaped like full tensor [NHWC]
|
tensorflow.python.framework.ops.RegisterGradient
| 9,619 |
import tensorflow as tf
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
|
tensorflow.transpose
| 9,620 |
import tensorflow as tf
return tf.constant(0, dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0, dtype=x.dtype)
def _transpose(self, x, perm):
sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims
perm = tf.concat([
tf.range(sample_batch_ndims),
sample_batch_ndims + perm,
], axis=0)
return tf.transpose(a=x, perm=perm)
def _maybe_validate_rightmost_transposed_ndims(
rightmost_transposed_ndims, validate_args, name=None):
|
tensorflow.range
| 9,621 |
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
|
tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined._extract_embedding_lr_multipliers
| 9,622 |
from tensorflow.python.framework import ops
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=name)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
# TODO(ptucker): Validate range of values in labels?
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
ignore_mask=None,
weights=None,
|
tensorflow.python.framework.ops.add_to_collections
| 9,623 |
import tensorflow as tf
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
|
tensorflow.layers.dense
| 9,624 |
import tensorflow as tf
return sampled
with argscope([Conv2D, FullyConnected], nl=tf.nn.relu):
with tf.variable_scope('STN1'):
sampled1 = get_stn(image)
with tf.variable_scope('STN2'):
sampled2 = get_stn(image)
# For visualization in tensorboard
with tf.name_scope('visualization'):
padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w
transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)
transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz')
tf.summary.image('visualize',
tf.expand_dims(stacked, -1), max_outputs=30)
sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
logits = (LinearWrap(sampled)
.FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)
.FullyConnected('fc2', out_dim=128, nl=tf.nn.relu)
.FullyConnected('fct', out_dim=19, nl=tf.identity)())
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
|
tensorflow.concat
| 9,625 |
import tensorflow as tf
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
|
tensorflow.variable_scope
| 9,626 |
import tensorflow as tf
>>> samples.dtype
dtype('float32')
"""
mu, var = self.build_prior_mean_var(test_points, num_latent, True)
jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06
L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter)
V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples]
V = tf.random_normal(V_shape, dtype=L.dtype)
samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
return tf.transpose(samples)
|
tensorflow.transpose
| 9,627 |
import tensorflow as tf
self.end_points_D_val = self.model.discriminator(
inputs, False, True, num_classes=num_classes, batch_size=batch_size_val)
# For printing layers shape
self.training_end_points = self.end_points_D
self.training_end_points.update(self.end_points_G)
tf.summary.histogram("d", self.end_points_D['D_on_data'])
tf.summary.histogram("d_", self.end_points_D['D_on_G'])
tf.summary.image("G", G)
d_label_smooth = self.cnf['d_label_smooth'] # 0.25
self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'],
1. - d_label_smooth)
class_loss_weight = 1.
self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets))
self.test_loss = 1. - \
|
tensorflow.summary.image
| 9,628 |
import tensorflow as tf
obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1"))
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func"))
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
|
tensorflow.one_hot
| 9,629 |
import tensorflow as tf
boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4)))
|
tensorflow.reshape
| 9,630 |
import tensorflow as tf
'Directory to keep training outputs.')
tf.app.flags.DEFINE_string('eval_dir', '',
'Directory to keep eval outputs.')
tf.app.flags.DEFINE_integer('eval_batch_count', 10,
'Number of batches to eval.')
tf.app.flags.DEFINE_bool('eval_once', False,
'Whether evaluate the model only once.')
tf.app.flags.DEFINE_string('log_root', '',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.train_dir/eval_dir.')
tf.app.flags.DEFINE_integer('num_gpus', 0,
'Number of gpus used for training. (0 or 1)')
tf.app.flags.DEFINE_integer('num_residual_units', 5,
'num of residual units')
tf.app.flags.DEFINE_string('Optimizer', 'mom',
'The optimizer used to train the model.')
tf.app.flags.DEFINE_bool('RCE_train', False,
'Whether use RCE to train the model.')
tf.app.flags.DEFINE_string('attack_method', 'fgsm',
'The attacking method used')
tf.app.flags.DEFINE_float('eps', 0.01,
'The eps in attacking methods.')
tf.app.flags.DEFINE_string('save_pwd', None,
'')
epoch_jsma = 100
|
tensorflow.app.flags.DEFINE_string
| 9,631 |
import tensorflow as tf
# Project the embeddings to space dimensions for visualization
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
tf.summary.scalar("stop_token_loss", model.stop_token_loss)
tf.summary.scalar("loss", model.loss)
|
tensorflow.summary.scalar
| 9,632 |
import tensorflow as tf
translation = tf.reshape([labeled_translations[i][0],
labeled_translations[i][2]], [2, 1])
pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation
pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation
pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation
pt_3 = rot @ tf.reshape([max_x, max_z], [2, 1]) + translation
for pt in [pt_0, pt_1, pt_2, pt_3]:
if pt[0] < box_limits_x[0]:
|
tensorflow.reshape
| 9,633 |
import tensorflow as tf
[tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)],
"image mean and average var",
first_n=1)
joint = tf.concat([inputs, G], 0)
log.info('Input size of unlabelled and generated %s' % (joint.get_shape()))
self.end_points_D = self.model.discriminator(
joint, True, None, num_classes=num_classes, batch_size=batch_size_train)
|
tensorflow.concat
| 9,634 |
import tensorflow as tf
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
|
tensorflow.gfile.Open
| 9,635 |
import tensorflow as tf
improvement = (mean_dist-mean_pred_error)/mean_dist
pairwise_improvement = tf.nn.relu(dists[1:] - pred_error)
pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype)
self.pairwise_improvement_bool = pairwise_improvement_bool
metrics.append(tf.summary.scalar('training/avg_dist', mean_dist))
metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))
metrics.append(tf.summary.scalar('training/improvement', improvement))
metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement)))
metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))
metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool)))
metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool))
self.eval_summs = tf.summary.merge(metrics)
def _build_embedding_saver(self, sess):
"""To use embedding visualizer data has to be stored in variable
since we would like to visualize TEST_SET, this variable should not affect
common checkpoint of the model.
Hence, we build a separate variable with a separate saver."""
embedding_shape = [int(len(self.test_set) / FLAGS.batch_size) * FLAGS.batch_size,
self.encode.get_shape().as_list()[1]]
tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding')
|
tensorflow.summary.merge
| 9,636 |
import tensorflow as tf
with tf.variable_scope('branch4_pool'):
|
tensorflow.variable_scope
| 9,637 |
from tensorflow.contrib.boosted_trees.proto import learner_pb2
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
|
tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig
| 9,638 |
import tensorflow as tf
opt_momentum = knobs['opt_momentum'] # Momentum optimizer momentum
grad_clip_norm = knobs['grad_clip_norm'] # L2 norm to clip gradients by
# Compute learning rate, gradients
tf_trainable_vars = tf.trainable_variables()
lr = self._get_learning_rate(step, **knobs)
grads = tf.gradients(loss, tf_trainable_vars)
self._mark_for_monitoring('lr', lr)
# Clip gradients
if grad_clip_norm > 0:
grads = [tf.clip_by_norm(x, grad_clip_norm) for x in grads]
# Init optimizer
opt = tf.train.MomentumOptimizer(lr, opt_momentum, use_locking=True, use_nesterov=True)
train_op = opt.apply_gradients(zip(grads, tf_trainable_vars), global_step=step)
return train_op
def _preprocess(self, images, classes, is_train=False, **knobs):
batch_size = knobs['batch_size']
cutout_size = knobs['cutout_size']
|
tensorflow.clip_by_norm
| 9,639 |
import tensorflow as tf
def _fuse(self):
with tf.variable_scope("Context_to_Query_Attention_Layer"):
C = tf.tile(tf.expand_dims(self.c_embed_encoding, 2), [1, 1, self.max_q_len, 1])
Q = tf.tile(tf.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1])
S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout)
|
tensorflow.expand_dims
| 9,640 |
import tensorflow as tf
new_w = epi_len - h + 1
weights = np.zeros([epi_len, epi_len])
for i in range(new_w):
weights[i:i + h, i] = 1.0
weights_list += [weights]
weights_tensors = tf.stack([tf.convert_to_tensor(weights, dtype=tf.float32) for weights in weights_list])
rand_horizon = tf.random_uniform((), 0, horizon, dtype=tf.int32)
new_w = epi_len - rand_horizon
cur_weights = tf.slice(weights_tensors[tf.cast(rand_horizon, tf.int32)], [0, 0], [epi_len, new_w])
# cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w])
horizon_pred = tf.matmul(pred, cur_weights)
horizon_tgt = tf.matmul(tgt, cur_weights)
return horizon_pred, horizon_tgt
|
tensorflow.random_uniform
| 9,641 |
import tensorflow as tf
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
|
tensorflow.constant
| 9,642 |
import tensorflow as tf
self.lr = learning_rate
self.gamma = gamma
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Critic'):
# Input (s, a), output q
self.a = a
self.q = self._build_net(S, self.a, 'eval_net', trainable=True)
# Input (s_, a_), output q_ for q_target
self.q_ = self._build_net(S_, a_, 'target_net', trainable=False) # target_q is based on a_ from Actor's target_net
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
|
tensorflow.get_collection
| 9,643 |
import tensorflow as tf
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),
[self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))
truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),
[self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))
truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),
[self.batch_size, s_h, s_w, self.c_dim], name='d_h4')
self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3
|
tensorflow.concat
| 9,644 |
import tensorflow as tf
gpu_copy_stage_ops = []
gpu_compute_stage_ops = []
gpu_grad_stage_ops = []
use_synthetic_gpu_images = (self.dataset is None)
with tf.device(self.global_step_device):
global_step = tf.contrib.framework.get_or_create_global_step()
# Build the processing and model for the worker.
with tf.device(self.cpu_device):
nclass, images_splits, labels_splits = add_image_preprocessing(
self.dataset, input_nchan, image_size, self.batch_size,
len(self.devices), input_data_type, self.resize_method,
not FLAGS.eval)
update_ops = None
staging_delta_ops = []
for device_num in range(len(self.devices)):
|
tensorflow.device
| 9,645 |
import tensorflow as tf
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
|
tensorflow.shape
| 9,646 |
import tensorflow as tf
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
# outputs = batch_norm_for_conv2d(outputs, is_training,
# bn_decay=bn_decay, scope='bn')
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
|
tensorflow.nn.conv2d_transpose
| 9,647 |
import tensorflow as tf
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
|
tensorflow.contrib.tpu.TPUConfig
| 9,648 |
import tensorflow as tf
tf.add_to_collection("string_collection", "hello")
tf.add_to_collection("variable_collection", v0)
|
tensorflow.add_to_collection
| 9,649 |
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
# To be defunnable, the function cannot return an Operation, so the above
# function is used for defun or eager, and this function is used in graph to be
# able to run the gradient updates.
def graph_step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
train_op = optimizer.apply_gradients(zip(grads, dynamics.variables))
return train_op, loss, samples
|
tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.loss_and_grads
| 9,650 |
import tensorflow as tf
# return np.random.choice(range(probs.shape[1]), p=probs.ravel()) # 从probs中按概率选取出某一个动作
def value(self, ob, g, cw, hw, cm, hm):
sess = tf.get_default_session()
return sess.run(self.manager_vf,
{self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw,
|
tensorflow.get_default_session
| 9,651 |
import tensorflow as tf
Arguments:
- *indicator*: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
- *num_samples*: int32 scalar tensor
Returns:
A boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
|
tensorflow.where
| 9,652 |
import tensorflow as tf
# normalize
res = (input_ - used_mean) / tf.sqrt(used_var + epsilon)
# de-normalize
if scale:
res *= gamma
res += beta
# update variables
if train:
with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
with ops.colocate_with(mean):
new_mean = tf.assign_sub(
mean,
tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean."))
with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
with ops.colocate_with(var):
new_var = tf.assign_sub(
var,
tf.check_numerics(decay * (var - cur_var),
"NaN in moving variance."))
with tf.name_scope(name, "IncrementTime", [step]):
with ops.colocate_with(step):
new_step = tf.assign_add(step, 1.)
res += 0. * new_mean * new_var * new_step
return res
|
tensorflow.check_numerics
| 9,653 |
import tensorflow as tf
# Used for input shapes of the prediction network
if self.data_shape is None:
self.data_shape = output_shapes
# Handle for the feedable iterator
self.handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
self.handle, output_types, output_shapes)
data = iterator.get_next()
# Build the actual training and evaluation models
self._train_graph(data)
self._eval_graph(data)
self.summaries = tf.summary.merge_all()
# Prediction network with feed_dict
self.pred_in = {i: tf.placeholder(self.input_spec[i]['type'], shape=s, name=i)
for i, s in self.data_shape.items()}
self._pred_graph(self.pred_in)
# Start session
sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus})
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
# Register tf dataset handles
if self.datasets:
|
tensorflow.summary.merge_all
| 9,654 |
import tensorflow as tf
_TRAIN_FEATURE_MAP = {
movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
rconst.MASK_START_INDEX: tf.FixedLenFeature([1], dtype=tf.string),
"labels": tf.FixedLenFeature([], dtype=tf.string),
}
|
tensorflow.FixedLenFeature
| 9,655 |
import tensorflow as tf
b = tf.get_variable("b", [nf], initializer=b_init)
if rf == 1: #faster 1x1 conv
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf])
else: #was used to train LM
|
tensorflow.reshape
| 9,656 |
import tensorflow as tf
input_files.extend(tf.gfile.Glob(input_pattern))
if FLAGS.input_dir is not None:
for filename in tf.gfile.ListDirectory(FLAGS.input_dir):
input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.input_dir, filename)))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
validation_input_files = []
if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None:
validation_input_files = input_files
else:
if FLAGS.validation_input_file is not None:
for input_pattern in FLAGS.validation_input_file.split(","):
validation_input_files.extend(tf.gfile.Glob(input_pattern))
if FLAGS.validation_input_dir is not None:
for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir):
validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))
tf.logging.info("*** Input Validation Files ***")
for input_file in validation_input_files:
tf.logging.info(" %s" % input_file)
config = tf.ConfigProto()
if FLAGS.xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.use_hvd:
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.allow_growth=True
|
tensorflow.gfile.Glob
| 9,657 |
import tensorflow as tf
tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss)
tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent)
tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution)
tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label)
tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution)
tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10)
tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10)
summary_op = tf.summary.merge_all()
# Saving the model
saver = tf.train.Saver()
step = 0
with tf.Session() as sess:
if train_model:
tensorboard_path, saved_model_path, log_path = form_results()
sess.run(init)
writer = tf.summary.FileWriter(logdir=tensorboard_path, graph=sess.graph)
x_l, y_l = mnist.test.next_batch(n_labeled)
for i in range(n_epochs):
n_batches = int(n_labeled / batch_size)
print("------------------Epoch {}/{}------------------".format(i, n_epochs))
for b in range(1, n_batches + 1):
z_real_dist = np.random.randn(batch_size, z_dim) * 5.
real_cat_dist = np.random.randint(low=0, high=10, size=batch_size)
|
tensorflow.Session
| 9,658 |
import tensorflow as tf
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0)
horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif)
pred_posi_dif = tf.where(geq, pred_dif, -pred_dif)
loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif)
cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32)
final_loss = tf.reduce_mean(loss)
return final_loss, cstr_pct
def contra_traj_lossV9(pred, tgt, horizon=12, margin=1):
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
|
tensorflow.maximum
| 9,659 |
import tensorflow as tf
return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0]
def resize_shortest_edge(image, image_shape, size):
shape = tf.cast(image_shape, tf.float32)
w_greater = tf.greater(image_shape[0], image_shape[1])
shape = tf.cond(w_greater,
|
tensorflow.cast
| 9,660 |
import tensorflow as tf
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
|
tensorflow.get_variable
| 9,661 |
import tensorflow as tf
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
|
tensorflow.nn.seq2seq.tied_rnn_seq2seq
| 9,662 |
import tensorflow as tf
with tf.variable_scope('abs_TD'):
self.abs_td = tf.abs(self.target_q - self.q)
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 700
# combine the action and states together in this way
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('l2'):
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
|
tensorflow.random_normal_initializer
| 9,663 |
import tensorflow as tf
f_score, f_geometry = model.model(images, is_training=True)
model_loss = model.loss(score_maps, f_score,
geo_maps, f_geometry,
training_masks)
total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# add summary
if reuse_variables is None:
tf.summary.image('input', images)
tf.summary.image('score_map', score_maps)
tf.summary.image('score_map_pred', f_score * 255)
tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
tf.summary.image('training_masks', training_masks)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('total_loss', total_loss)
return total_loss, model_loss
|
tensorflow.summary.image
| 9,664 |
import tensorflow as tf
# Reshape x_discrete
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:-1]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
embed_fn = partial(self.embed)
return {
"dense": res,
"discrete": x_discrete,
|
tensorflow.nn.relu
| 9,665 |
import tensorflow as tf
alpha_std = tf.exp(alpha_logstd)
# Compute epsilon from {n_samples} standard Gaussian
# epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out])
epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
hyp_params = tf.get_variable('hyp_params_layer'+str(h),
shape=[2],
initializer=tf.random_normal_initializer())
l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
# Compute A_{h+1}
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
F = tf.squeeze(tf.layers.dense(Z, n_out), [2])
return F, KL
|
tensorflow.cos
| 9,666 |
import tensorflow as tf
out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4))
# Combine to constant channels
with tf.variable_scope('combine'):
W = self._make_var('W', (ni, block_ch * block_ch))
W = tf.gather(W, unused_indices, axis=0)
W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch))
X = tf.reshape(out_blocks, (-1, w, h, num_out_blocks * block_ch))
X = tf.nn.relu(X)
X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME')
|
tensorflow.gather
| 9,667 |
import tensorflow as tf
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
|
tensorflow.stop_gradient
| 9,668 |
import tensorflow as tf
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
|
tensorflow.logging.info
| 9,669 |
import tensorflow as tf
is_training = False
with tf.variable_scope('RGB'):
self.feature, _ = InceptionI3d(
|
tensorflow.variable_scope
| 9,670 |
import tensorflow as tf
'''
# Value FC
value = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=None)(inputs=self.rnn_out)
# rnn_out_frozen = tf.stop_gradient(self.rnn_out)
next_loc_mean = Dense(units=2, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=tf.math.tanh)(inputs=self.rnn_out) # was rnn_out_frozen
loc_std = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), activation=tf.nn.softplus)(inputs = self.rnn_out)
# Policy FC
next_loc = tf.clip_by_value(next_loc_mean + tf.random_normal([1,2], 0, loc_std), -1, 1)
# next_loc = tf.stop_gradient(next_loc)
return value, next_loc_mean, loc_std, next_loc, state_out, state_in, state_init
|
tensorflow.random_normal
| 9,671 |
import tensorflow as tf
merged = tf.merge_all_summaries()
if not os.path.exists('tensorboard_logs/'):
os.makedirs('tensorboard_logs/')
my_writer = tf.train.SummaryWriter('tensorboard_logs/', sess.graph)
|
tensorflow.train.SummaryWriter
| 9,672 |
import tensorflow as tf
if train:
with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
with ops.colocate_with(mean):
new_mean = tf.assign_sub(
mean,
tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean."))
with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
with ops.colocate_with(var):
new_var = tf.assign_sub(
var,
tf.check_numerics(decay * (var - cur_var),
"NaN in moving variance."))
with tf.name_scope(name, "IncrementTime", [step]):
with ops.colocate_with(step):
new_step = tf.assign_add(step, 1.)
res += 0. * new_mean * new_var * new_step
return res
# batch normalization taking into account the volume transformation
def batch_norm_log_diff(input_,
dim,
name,
train=True,
epsilon=1e-8,
decay=.1,
axes=[0],
|
tensorflow.assign_add
| 9,673 |
import tensorflow as tf
l = batch_norm_conv(l, b_train=bn_phaze, scope='bn')
l = act_func(l)
l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation,
non_linear_fn=None, bias=use_bias)
l = tf.concat([l, layer], 3)
return l
def add_residual_layer(layer, filter_dims, act_func=tf.nn.relu, scope='residual_layer',
use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]):
with tf.variable_scope(scope):
l = layer
if use_bn:
l = batch_norm_conv(l, b_train=bn_phaze, scope='bn')
l = act_func(l)
l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation, non_linear_fn=act_func, bias=use_bias)
return l
|
tensorflow.variable_scope
| 9,674 |
from tensorflow.python.ops import control_flow_ops
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
(config_name, self._GetConfigDesc(config)))
if __name__ == "__main__":
test.main()
|
tensorflow.python.ops.control_flow_ops.group
| 9,675 |
import tensorflow as tf
def LSGAN_losses(real, fake):
d_real = tf.reduce_mean(tf.squared_difference(real, 1), name='d_real')
d_fake = tf.reduce_mean(tf.square(fake), name='d_fake')
d_loss = tf.multiply(d_real + d_fake, 0.5, name='d_loss')
g_loss = tf.reduce_mean(tf.squared_difference(fake, 1), name='g_loss')
add_moving_summary(g_loss, d_loss)
return g_loss, d_loss
with tf.name_scope('losses'):
with tf.name_scope('LossA'):
# reconstruction loss
recon_loss_A = tf.reduce_mean(tf.abs(A - ABA), name='recon_loss')
# gan loss
G_loss_A, D_loss_A = LSGAN_losses(A_dis_real, A_dis_fake)
with tf.name_scope('LossB'):
recon_loss_B = tf.reduce_mean(tf.abs(B - BAB), name='recon_loss')
G_loss_B, D_loss_B = LSGAN_losses(B_dis_real, B_dis_fake)
|
tensorflow.name_scope
| 9,676 |
import tensorflow as tf
extra_zeros = tf.zeros((batch_size, self.max_phrase_size))
vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize]
if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase
attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts,
vocab_dist, attn_dist)
# match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize]
batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)
batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1)
batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length)
step_nums = tf.range(0, limit=passage_length) # [passage_length]
step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length)
step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length)
indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3)
indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3]
indices = tf.cast(indices, tf.int64)
shape = [batch_size, passage_length, extended_vsize]
shape = tf.cast(shape, tf.int64)
attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length]
one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize]
if passage_mask is not None:
passage_mask = tf.expand_dims(passage_mask, axis=-1)
one_hot_spare_rep = one_hot_spare_rep * passage_mask
one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize]
vocab_dist = tf.add(vocab_dist, one_hot_spare_rep)
|
tensorflow.cast
| 9,677 |
import tensorflow as tf
tensors = {
'a': tf.compat.v1.placeholder(tf.float32, ()),
}
with self.assertRaises(ValueError):
schema_inference.infer_feature_schema(tensors, graph)
def test_bucketization_annotation(self):
# TODO(b/132098015): Schema annotations aren't yet supported in OSS builds.
# pylint: disable=g-import-not-at-top
try:
from tensorflow_transform import annotations_pb2
except ImportError:
return
# pylint: enable=g-import-not-at-top
with tf.compat.v1.Graph().as_default() as graph:
inputs = {
'foo': tf.convert_to_tensor([0, 1, 2, 3]),
'bar': tf.convert_to_tensor([0, 2, 0, 2]),
}
boundaries_foo = tf.expand_dims(tf.convert_to_tensor([.5, 1.5]), axis=0)
boundaries_bar = tf.expand_dims(tf.convert_to_tensor([.1, .2]), axis=0)
outputs = {}
# tft.apply_buckets will annotate the feature in the output schema to
# indicate the bucket boundaries that were applied.
outputs['Bucketized_foo'] = mappers.apply_buckets(inputs['foo'],
boundaries_foo)
outputs['Bucketized_bar'] = mappers.apply_buckets(inputs['bar'],
|
tensorflow.compat.v1.Graph
| 9,678 |
import tensorflow as tf
# Reshape x_discrete
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:-1]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
|
tensorflow.reshape
| 9,679 |
import tensorflow as tf
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
|
tensorflow.to_int32
| 9,680 |
import tensorflow as tf
p = self._testParams()
tp = p.train
tp.lr_schedule.boundaries = [300000, 400000, 500000]
tp.lr_schedule.values = [1.0, 0.1, 0.01, 0.001]
lrs = tp.lr_schedule.Instantiate()
steps = [299999, 300001, 399999, 400001, 499999, 500001]
fetches = [lrs.Value(_) for _ in steps]
values = sess.run(fetches)
self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values)
def testBatchSplit(self):
def Run(num_splits):
p = self._testParams()
with self.session(use_gpu=False, graph=tf.Graph()) as sess:
tf.set_random_seed(93820981)
p.is_eval = True
p.input.cur_iter_in_seed = False
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
tf.global_variables_initializer().run()
return sess.run(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
|
tensorflow.set_random_seed
| 9,681 |
import tensorflow as tf
activation=tf.nn.relu, padding="same")
x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),
|
tensorflow.layers.conv2d
| 9,682 |
import tensorflow as tf
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
|
tensorflow.reduce_sum
| 9,683 |
import tensorflow as tf
axis=1)
# IMP: This is sum, as expectation wrt f
loss_bc = -tf.reduce_mean(gain_bc)
loss_policy = loss_f + loss_bc
# Value/Q function loss, and explained variance
check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2)
explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]),
tf.reshape(qret, [self.n_envs, self.n_steps]))
loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)
# Net loss
check_shape([loss_policy, loss_q, entropy], [[]] * 3)
loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy
tf.summary.scalar('entropy_loss', entropy)
tf.summary.scalar('policy_gradient_loss', loss_policy)
tf.summary.scalar('value_function_loss', loss_q)
tf.summary.scalar('loss', loss)
|
tensorflow.stop_gradient
| 9,684 |
import tensorflow as tf
def add_dense_transition_layer(layer, filter_dims, stride_dims=[1, 1], act_func=tf.nn.relu, scope='transition',
use_bn=True, bn_phaze=False, use_pool=True, use_bias=False, dilation=[1, 1, 1, 1]):
with tf.variable_scope(scope):
if use_bn:
l = batch_norm_conv(layer, b_train=bn_phaze, scope='bn')
|
tensorflow.variable_scope
| 9,685 |
import tensorflow as tf
if lengths is None:
if input_ids is not None:
lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1)
else:
lengths = tf.convert_to_tensor([slen] * bs, tf.int32)
# mask = input_ids != self.pad_index
# check inputs
# assert shape_list(lengths)[0] == bs
|
tensorflow.convert_to_tensor
| 9,686 |
from tensorflow.python.ops import variable_scope
Returns:
value_tensor: A tensor representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', [predictions, labels]):
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 0))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
|
tensorflow.python.ops.variable_scope.variable_scope
| 9,687 |
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
self._assertCommonMetrics(metrics)
def _assertCommonMetrics(self, metrics):
estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step',
metrics)
estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics)
|
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
| 9,688 |
import tensorflow as tf
with tf.Session(config=sess_config) as sess:
input_image = tf.constant(input_image, dtype=tf.float32)
output = MODEL.build_server_graph(FLAGS, input_image)
output = (output + 1.) * 127.5
output = tf.reverse(output, [-1])
output = tf.saturate_cast(output, tf.uint8)
# load pretrained model
vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
assign_ops = []
for var in vars_list:
vname = var.name
from_name = vname
var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name)
assign_ops.append(tf.assign(var, var_value))
|
tensorflow.get_collection
| 9,689 |
import tensorflow as tf
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
class TrainR3DetDCL(Train):
def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):
return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \
gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
|
tensorflow.Graph
| 9,690 |
import tensorflow as tf
return {f: example[f] for f in feature_list if f in example}
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [el.numpy() for el in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files,
eval_shuffle_files):
"""Return train and evaluation datasets, feature info and supervised keys."""
with tf.device('cpu:0'):
|
tensorflow.nest.pack_sequence_as
| 9,691 |
import tensorflow as tf
import tensorflow as tf
import numpy as np
import time
from tensorflow.contrib.rnn import GRUCell
from util.infolog import log
def prenet(inputs, is_training, layer_sizes, scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu, name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate, training=is_training, name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training, depth):
input_channels = inputs.get_shape()[2]
return cbhg(
inputs,
input_lengths,
is_training,
scope='encoder_cbhg',
K=16,
projections=[128, input_channels],
depth=depth)
|
tensorflow.layers.dropout
| 9,692 |
import tensorflow as tf
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale': True,
}
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with tf.variable_scope(_DECODER_SCOPE, _DECODER_SCOPE, [features]):
feature_list = feature_extractor.networks_to_feature_maps[
model_variant][feature_extractor.DECODER_END_POINTS]
if feature_list is None:
tf.logging.info('Not found any decoder end points.')
return features
else:
decoder_features = features
for i, name in enumerate(feature_list):
decoder_features_list = [decoder_features]
feature_name = '{}/{}'.format(
feature_extractor.name_scope[model_variant], name)
decoder_features_list.append(
slim.conv2d(
# end_points["refinement_net/" + feature_name],
|
tensorflow.variable_scope
| 9,693 |
import tensorflow as tf
truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
|
tensorflow.reshape
| 9,694 |
import tensorflow as tf
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
|
tensorflow.reduce_mean
| 9,695 |
import tensorflow as tf
def augment(features, targets):
features['image'] = _cifar_augment_image(features['image'])
return features, targets
def cast_image(features, targets):
features['image'] = tf.cast(features['image'], tf.float32) / 255.0
return features, targets
if training:
dataset = dataset.map(augment)
dataset = dataset.map(cast_image)
|
tensorflow.cast
| 9,696 |
import tensorflow as tf
is_training=True)
# data processing
inputs_list = []
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
|
tensorflow.py_func
| 9,697 |
import tensorflow as tf
x_ = K.exp(x) + K.epsilon()
# Only keep the diagonal elements.
x_ *= diag_mask
# Add the original, non-diagonal elements.
x_ += x * (1. - diag_mask)
# Finally, gather everything into a lower triangular matrix.
L_ = tf.gather(x_, tril_mask)
return [L_, tf.transpose(L_)]
tmp = tf.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
# TensorFlow 0.10 now returns a tuple of tensors.
L, LT = tmp
else:
|
tensorflow.transpose
| 9,698 |
import tensorflow as tf
def hgru_ops(self, i0, x, h2, layer, layer_idx):
"""hGRU body."""
var_scope = '%s_hgru_weights' % layer
# Circuit input receives recurrent output h2
c1, g1 = self.circuit_input(
h2=h2,
layer=layer,
var_scope=var_scope,
layer_idx=layer_idx)
with tf.variable_scope(
'%s/c1_bn' % var_scope,
reuse=self.scope_reuse) as scope:
c1 = tf.contrib.layers.batch_norm(
inputs=c1,
scale=True,
center=False,
fused=True,
renorm=False,
param_initializers=self.param_initializer,
updates_collections=None,
scope=scope,
reuse=self.reuse,
is_training=self.train)
|
tensorflow.contrib.layers.batch_norm
| 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.