seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
keep_prob, is_train, wd, activation
) # [bs,bn,vec]
block_ct_res_tile = tf.tile(tf.expand_dims(block_ct_res, 2), [1, 1, bl, 1])#[bs,bn,vec]->[bs,bn,bl,vec]
with tf.variable_scope('combination'):
# input:1.rep_map[bs,bn,bl,vec]; 2.self_attn_result[bs,bn,bl,vec]; 3.rnn_res_tile[bs,bn,bl,vec]
rep_tensor_with_ct = tf.concat([rep_map, self_attn_result, block_ct_res_tile], -1) # [bs,bn,bl,3vec]
new_context_and_gate = linear(rep_tensor_with_ct, 2 * ivec, True, 0., 'linear_new_context_and_gate',
False, wd, keep_prob, is_train) # [bs,bn,bl,2vec]
new_context, gate = tf.split(new_context_and_gate, 2, 3) # bs,bn,bl,vec
if activation == "relu":
new_context_act = tf.nn.relu(new_context)
elif activation == "elu":
|
tensorflow.concat
| 9,900 |
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
|
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
| 9,901 |
import tensorflow as tf
"""
# tvar = tf.trainable_variables() if train_only else tf.all_variables()
if train_only:
t_vars = tf.trainable_variables()
logging.info(" [*] printing trainable variables")
else:
|
tensorflow.trainable_variables
| 9,902 |
import tensorflow as tf
out_entry_count = np.prod(out_sizes) * max_density
bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32)
no_strides = [1, 1, 1, 1, 1]
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3)
s1 = tf.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
[t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes)
s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh)
|
tensorflow.SparseTensor
| 9,903 |
import tensorflow as tf
# implement one step of the RNN
def rnn_step(self, rnn_in, state):
if self.dale_ratio:
new_state = (1-self.alpha) * state \
+ self.alpha * (
tf.matmul(
tf.nn.relu(state),
tf.matmul(
tf.abs(self.W_rec) * self.rec_Connectivity,
self.Dale_rec, name="in_1"),
transpose_b=True, name="1")
+ tf.matmul(
rnn_in,
tf.abs(self.W_in) * self.input_Connectivity,
transpose_b=True, name="2")
+ self.b_rec)\
+ np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\
* tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0)
|
tensorflow.abs
| 9,904 |
import tensorflow as tf
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
|
tensorflow.trainable_variables
| 9,905 |
import tensorflow as tf
"""Steps after which learning rate decays.""")
tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
"""Learning rate decay factor.""")
tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""")
tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""")
tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""")
tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude.
Disabled by default.""")
tf.flags.DEFINE_float('weight_decay', 0.00004,
"""Weight decay factor for training.""")
# Performance tuning flags.
tf.flags.DEFINE_boolean('winograd_nonfused', True,
"""Enable/disable using the Winograd non-fused
algorithms.""")
tf.flags.DEFINE_boolean('sync_on_finish', False,
"""Enable/disable whether the devices are synced after
|
tensorflow.flags.DEFINE_float
| 9,906 |
import tensorflow as tf
def test_instance_non_maximum_suppression_1d_scores(self):
mask0 = tf.constant([[1, 0],
[0, 1]], dtype=tf.float32)
mask1 = tf.constant([[1, 1],
[0, 1]], dtype=tf.float32)
mask2 = tf.constant([[1, 0],
[1, 1]], dtype=tf.float32)
mask3 = tf.constant([[1, 1],
[1, 1]], dtype=tf.float32)
mask4 = tf.constant([[0, 0],
[0, 0]], dtype=tf.float32)
|
tensorflow.constant
| 9,907 |
import tensorflow as tf
def _relu(name, x):
"""
Applies ReLU function.
:param name: [string] Name of the op.
:param x: [Tensor] Input to the function.
:return: [Tensor] Output of the function.
"""
return tf.nn.relu(x, name=name)
# log.warning('Not using ReLU to test performance at inference time')
# return x
def _stride_arr(n, data_format='NHWC'):
"""Makes strides array for downsampling convolution."""
if data_format == 'NHWC':
return [1, n, n, 1]
elif data_format == 'NCHW':
|
tensorflow.nn.relu
| 9,908 |
import tensorflow as tf
conv = tf.nn.conv2d(x, w, stride, padding)
else:
conv = tf.pad(x, padding, "CONSTANT")
conv = tf.nn.conv2d(conv, w, stride, padding='VALID')
if bias != -1:
|
tensorflow.nn.conv2d
| 9,909 |
import tensorflow as tf
f_i_),
axis=1)
# IMP: This is sum, as expectation wrt f
loss_bc = -tf.reduce_mean(gain_bc)
loss_policy = loss_f + loss_bc
# Value/Q function loss, and explained variance
check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2)
explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]),
tf.reshape(qret, [self.n_envs, self.n_steps]))
loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)
# Net loss
check_shape([loss_policy, loss_q, entropy], [[]] * 3)
loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy
tf.summary.scalar('entropy_loss', entropy)
|
tensorflow.reshape
| 9,910 |
import tensorflow as tf
widths, x_centers = tf.meshgrid(widths, x_centers)
heights, y_centers = tf.meshgrid(heights, y_centers)
anchor_centers = tf.stack([x_centers, y_centers], axis=2)
anchor_centers = tf.reshape(anchor_centers, [-1, 2])
anchor_sizes = tf.stack([widths, heights], axis=2)
anchor_sizes = tf.reshape(anchor_sizes, [-1, 2])
anchors = tf.concat([anchor_centers - .5 * anchor_sizes,
anchor_centers + .5 * anchor_sizes], 1)
# anchors = box_utils.convert_yxyx_to_xyxy_format(anchors)
|
tensorflow.reshape
| 9,911 |
import tensorflow as tf
# clip gradients
clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple)
# compute norms in case they need to be logged
self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars]
self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars]
# check that gradients are finite
grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars]
variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars]
self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)]
# 2nd part of minimize: apply_gradient
optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
update_ops = tf.group(*self.update_ops)
self.training_op = tf.group(update_ops, optimizer_step)
def set_check_ops(self):
self._check_ops = 1
# TODO argo2 This is not working anymore with the new session
#with self.sess.graph.as_default():
self._numerics_ops = tf.add_check_numerics_ops()
def release(self):
super().release()
self.sess.close()
tf.reset_default_graph()
|
tensorflow.group
| 9,912 |
import tensorflow as tf
# Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer
# This is V[s_t, h*_t] + b in the paper
with variable_scope.variable_scope("AttnOutputProjection"):
output_t = linear([cell_output] + [context_t], options.gen_hidden_size, True)
with tf.variable_scope('output_projection'):
w = tf.get_variable('w', [options.gen_hidden_size, vocab.vocab_size+1], dtype=tf.float32)
b = tf.get_variable('b', [vocab.vocab_size +1], dtype=tf.float32)
# vocab_scores is the vocabulary distribution before applying softmax.
# Each entry on the list corresponds to one decoder step
vocab_score_t = tf.nn.xw_plus_b(output_t, w, b) # apply the linear layer
|
tensorflow.variable_scope
| 9,913 |
import tensorflow as tf
assert typ == Nonlinearity.GAUSSIAN
return lambda arg: tf.exp(-arg * arg)
|
tensorflow.exp
| 9,914 |
import tensorflow as tf
)
# Decrement episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes)
# Assign new observations.
with tf.control_dependencies(control_inputs=(assignment,)):
|
tensorflow.assign_sub
| 9,915 |
import tensorflow as tf
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
_zero = tf.constant(0,dtype=_x.dtype)
# return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
return tf.maximum(_zero, _x) + _alpha * tf.minimum(_zero, _x)
def calc_auc(raw_arr):
|
tensorflow.variable_scope
| 9,916 |
from tensorflow.python.framework import ops
if metrics_collections:
ops.add_to_collections(metrics_collections, root_mean_squared_error)
|
tensorflow.python.framework.ops.add_to_collections
| 9,917 |
import tensorflow as tf
ys = tf.placeholder(tf.float32, [None, 1], name='y_input') # 同上
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu,nameScope="layerTest1")
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None,nameScope="layerTest2")
sess = tf.Session()
# 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。
# 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去
# 这样运行完这段代码之后,就会有一个logs的文件夹被创建
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12
writer = tf.train.SummaryWriter('logs/', sess.graph)
else: # tensorflow version >= 0.12
|
tensorflow.Session
| 9,918 |
from tensorflow.contrib.eager.python import tfe
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
|
tensorflow.contrib.eager.python.tfe.Iterator
| 9,919 |
from tensorflow.python.framework import ops
epsilon=1e-8,
use_locking=False, name="Adamirror"):
super(OptimisticAdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "g", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
|
tensorflow.python.framework.ops.convert_to_tensor
| 9,920 |
import tensorflow as tf
def _add_act_summary(self, tensor):
tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)
tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',
tf.nn.zero_fraction(tensor))
def _add_score_summary(self, key, tensor):
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def _add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
# Custom Layers #
def _reshape_layer(self, bottom, num_dim, name):
input_shape = tf.shape(bottom)
with tf.variable_scope(name):
# change the channel to the caffe format
# 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分
# 第二次[1,2,none,none]
to_caffe = tf.transpose(bottom, [0, 3, 1, 2])
# then force it to have channel 2
#[1,2,none.none],将9个anchor的前景得分和背景得分分开
# 第二次[1,18,none,none]
reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]]))
# then swap the channel back
# [1,none,none,2], 第一个none应该为(行*9)
# 第二次[1,none,none,18]
to_tf = tf.transpose(reshaped, [0, 2, 3, 1])
|
tensorflow.shape
| 9,921 |
import tensorflow as tf
reverse_targets_encoded = tf.reverse_sequence(
targets_encoded, seq_lengths, seq_axis=0, batch_axis=1)
# Compute the reverse rnn over the targets.
reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell,
reverse_targets_encoded,
time_major=True,
dtype=tf.float32)
reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths,
seq_axis=0, batch_axis=1)
self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out,
clear_after_read=False)
def _filtering_proposal(self, rnn_out, prior, t):
"""Computes the filtering proposal distribution."""
|
tensorflow.reverse_sequence
| 9,922 |
import tensorflow as tf
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
|
tensorflow.constant_initializer
| 9,923 |
import tensorflow as tf
# ==============================================================================
"""Model helper for creating a ResNet model for the CIFAR-10 dataset."""
import tensorflow as tf
from nets.abstract_model_helper import AbstractModelHelper
from datasets.cifar10_dataset import Cifar10Dataset
from utils.external import resnet_model as ResNet
from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant
from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model')
tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio')
tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate')
tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size')
tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient')
tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient')
def forward_fn(inputs, is_train, data_format):
"""Forward pass function.
Args:
* inputs: inputs to the network's forward pass
* is_train: whether to use the forward pass with training operations inserted
* data_format: data format ('channels_last' OR 'channels_first')
|
tensorflow.app.flags.DEFINE_float
| 9,924 |
import tensorflow as tf
decoded = tf.cast(tf.argmax(inputs, axis=-1), tf.int32)
# Adjust event vals according to representation
decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded)
# Set default vals
decoded = tf.where(tf.equal(decoded, 0), def_val, decoded)
return decoded, None
@tf.function
def ctc_decode(inputs, batch_size, seq_length, blank_index, def_val, shift, beam_width=10):
"""Perform ctc decoding"""
# Decode uses time major
inputs = tf.transpose(a=inputs, perm=[1, 0, 2])
seq_lengths = tf.fill([batch_size], seq_length)
# Perform beam search
indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder(
inputs=inputs, sequence_length=seq_lengths,
beam_width=beam_width, blank_index=blank_index, top_paths=1,
blank_label=0)
decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0])
decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32)
decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0])
decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32)
|
tensorflow.transpose
| 9,925 |
import tensorflow as tf
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
weighted_error,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
|
tensorflow.group
| 9,926 |
import tensorflow as tf
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_)
crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps]))
if rewards is not None:
crossent *= tf.stop_gradient(rewards)
log_perp = tf.reduce_sum(crossent * weights, axis=1)
|
tensorflow.stack
| 9,927 |
import tensorflow as tf
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
|
tensorflow.matmul
| 9,928 |
import tensorflow as tf
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
|
tensorflow.metrics.mean
| 9,929 |
from tensorflow.python.framework import ops
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
|
tensorflow.python.framework.ops.RegisterShape
| 9,930 |
import tensorflow as tf
f_i_ = distribution_f.prob(action_)
f_polyak_i = f_polyak.prob(self.action_ph)
phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps)
q_value = strip(train_model.value_fn, self.n_envs, self.n_steps)
q_i = q_value[:, 0]
rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps)
rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps)
qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, tf.pow(rho_i, 1 / self.n_act),
self.n_envs, self.n_steps, self.gamma)
else:
# strip off last step
# f is a distribution, chosen to be Gaussian distributions
# with fixed diagonal covariance and mean \phi(x)
# in the paper
distribution_f, f_polyak, q_value = \
map(lambda variables: strip(variables, self.n_envs, self.n_steps),
[train_model.policy_proba, polyak_model.policy_proba, train_model.q_value])
|
tensorflow.pow
| 9,931 |
import tensorflow as tf
@dynamic_batching.batch_fn_with_options(maximum_batch_size=2)
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
outputs = [
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
]
tf.train.start_queue_runners()
|
tensorflow.constant
| 9,932 |
import tensorflow as tf
size. Also randomly apply horizontal flip.
Args:
example: An example dict containing an image and a label.
random_crop_pad: By how many pixels should the image be padded on each side
before cropping.
Returns:
An example with the same label and an augmented version of the image.
"""
image, label = example['image'], example['label']
image = tf.image.random_flip_left_right(image)
image_shape = tf.shape(image)
image = tf.pad(
image, [[random_crop_pad, random_crop_pad],
[random_crop_pad, random_crop_pad], [0, 0]],
mode='REFLECT')
image = tf.image.random_crop(image, image_shape)
return {'image': image, 'label': label}
def auto_augmentation(example,
|
tensorflow.image.random_flip_left_right
| 9,933 |
import tensorflow as tf
state_prob = tf.placeholder(tf.float32,name='state_prob')
output_prob = tf.placeholder(tf.float32,name='output_prob')
rnn_inputs = x
"""Define a single cell with variational dropout"""
def get_a_cell(state_size,input_prob,state_prob,num_input):
if cell_type == 'LSTM':
if activation == 'linear':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif activation == 'relu':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
else: #tanh by default
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
|
tensorflow.contrib.rnn.DropoutWrapper
| 9,934 |
import tensorflow as tf
def get_valid_batch(image,label,batch_size):
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
|
tensorflow.reshape
| 9,935 |
import tensorflow as tf
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
|
tensorflow.nn.rnn_cell.LSTMCell
| 9,936 |
import tensorflow as tf
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
|
tensorflow.reshape
| 9,937 |
import tensorflow as tf
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
|
tensorflow.one_hot
| 9,938 |
import tensorflow as tf
pad_id = tf.constant(pad_id, dtype=tf.int32)
# fmt: off
padded_shapes = kwargs.get("padded_shapes", ([maxlen, ], [maxlen, ], [maxlen, ], [maxlen, ]))
padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id))
# fmt: on
dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs)
return dataset
def _batch_padding(self, dataset: tf.data.Dataset, pad_id=0, **kwargs) -> tf.data.Dataset:
pad_id = tf.constant(pad_id, dtype=tf.int32)
# fmt: off
padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ]))
padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id))
# fmt: on
dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs)
return dataset
def _bucket_padding(self, dataset: tf.data.Dataset, pad_id=0, **kwargs) -> tf.data.Dataset:
|
tensorflow.constant
| 9,939 |
from tensorflow.python.framework import ops
update_op = compute_sensitivity_at_specificity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
|
tensorflow.python.framework.ops.add_to_collections
| 9,940 |
from tensorflow.python.ops import array_ops
max_var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
|
tensorflow.python.ops.array_ops.zeros
| 9,941 |
import tensorflow as tf
validnum: valid_num
}
with tf.Session(config=config) as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
tf.train.Saver().restore(sess,path)
#test
|
tensorflow.train.Coordinator
| 9,942 |
import tensorflow as tf
return [train]+ops
def mgpu_predict(*xs):
gpu_ops = []
xs = (tf.split(x, n_gpu, 0) for x in xs)
for i, xs in enumerate(zip(*xs)):
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=True):
clf_logits, clf_losses, lm_losses = model(*xs, train=False, reuse=True)
|
tensorflow.split
| 9,943 |
from tensorflow.contrib.eager.python.examples.spinn import data
# 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
dev_data = data.SnliData(fake_train_file, word2index)
test_data = data.SnliData(fake_train_file, word2index)
# 2. Create a fake config.
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"))
# 3. Test training of a SPINN model.
trainer = spinn.train_or_infer_spinn(
|
tensorflow.contrib.eager.python.examples.spinn.data.SnliData
| 9,944 |
import tensorflow as tf
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
#'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all'
'model_scope', None,
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', False,
'Wether we will train on cloud.')
tf.app.flags.DEFINE_boolean(
'seq_train', False,
'Wether we will train a sequence model.')
tf.app.flags.DEFINE_string(#
'model_to_train', 'blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers',
'The sub-model to train (comma-separated list).')
FLAGS = tf.app.flags.FLAGS
#--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1
|
tensorflow.app.flags.DEFINE_boolean
| 9,945 |
from tensorflow.python.ops import math_ops
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else (
array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims((), sample_ndims, name="sample_dims"),
make_dims((sample_ndims,), self.batch_ndims, name="batch_dims"),
make_dims((sample_ndims, self.batch_ndims),
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
|
tensorflow.python.ops.math_ops.range
| 9,946 |
from tensorflow.python.ops import array_ops
# Sparse.
if isinstance(tensor, ops.SparseTensorValue):
tensor = ops.SparseTensor.from_value(tensor)
if isinstance(tensor, ops.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
0, (array_ops.slice(tensor.shape, [0], expand_dims), [1],
array_ops.slice(tensor.shape, expand_dims, [-1])),
|
tensorflow.python.ops.array_ops.size
| 9,947 |
import tensorflow as tf
u = tf.string_join(z_t[i:i + self._p], '')
vz_keys, r = tf.cond(
tf.greater(vz.lookup(u), -1),
true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)),
false_fn=lambda: (
tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64))
)
vz.insert(u, r)
kk = tf.Variable(0, dtype=tf.int64)
for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'):
for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'):
to_add = tf.cond(
tf.greater(vz.lookup(vx_keys[i]), -1),
true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])),
false_fn=lambda: tf.constant(0, dtype=tf.int64)
)
kk = tf.math.add(kk, to_add)
|
tensorflow.Variable
| 9,948 |
import tensorflow as tf
return tf.squeeze(y,axis=0)
element = [x,x_coori]
ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform")
def loop_hyper_encoder(y):
y = tf.expand_dims(y, 0)
z = hyper_encoder(y)
return tf.squeeze(z,axis=0)
zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Hyper Encoder")
|
tensorflow.expand_dims
| 9,949 |
import tensorflow as tf
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV3(pred, tgt, margin=1.0):
# Step-wise contrastive loss
pred1, pred2 = tf.split(pred, 2, axis=0)
tgt1, tgt2 = tf.split(tgt, 2, axis=0)
geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tgt_larg = tf.where(geq, tgt1, tgt2)
tgt_small = tf.where(geq, tgt2, tgt1)
pred_larg = tf.where(geq, pred1, pred2)
pred_small = tf.where(geq, pred2, pred1)
loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin)
loss = tf.reduce_mean(loss)
return loss
def contra_step_lossV4(pred, tgt):
# 50*50
# Step-wise contrastive loss
|
tensorflow.where
| 9,950 |
import tensorflow as tf
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False)
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init,
strides=stride, padding="SAME", use_bias=False)
x = x * mask_ratio
if use_bias:
|
tensorflow.clip_by_value
| 9,951 |
import tensorflow as tf
# logit of the start position
with tf.variable_scope("start_logits"):
start_logits = tf.layers.dense(
output,
1,
kernel_initializer=initializer)
start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0])
start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask
start_log_probs = tf.nn.log_softmax(start_logits_masked, -1)
# logit of the end position
with tf.variable_scope("end_logits"):
if is_training:
# during training, compute the end logits based on the
# ground truth of the start position
start_positions = tf.reshape(features["start_positions"], [-1])
start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1,
dtype=tf.float32)
start_features = tf.einsum("lbh,bl->bh", output, start_index)
start_features = tf.tile(start_features[None], [seq_len, 1, 1])
end_logits = tf.layers.dense(
|
tensorflow.variable_scope
| 9,952 |
import tensorflow as tf
if is_dilated:
layer = tf.nn.atrous_conv2d(img, w, rate=2, padding='SAME') + b
else:
layer = tf.nn.conv2d(img, w, strides=strides, padding='SAME') + b
return layer
|
tensorflow.nn.conv2d
| 9,953 |
import tensorflow as tf
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session() as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
|
tensorflow.train.Saver
| 9,954 |
import tensorflow as tf
OUTPUT_GRAPH = False
ENV_NAME = 'BipedalWalker-v2'
GLOBAL_STEP = tf.Variable(0, trainable=False)
INCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))
LR_A = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .97, staircase=True)
LR_C = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .97, staircase=True)
END_POINT = (200 - 10) * (14/30) # from game
env = gym.make(ENV_NAME)
|
tensorflow.train.exponential_decay
| 9,955 |
import tensorflow as tf
value = self.data_dict[name][idx]
else:
value = initial_value
if self.trainable:
var = tf.get_variable(name = var_name, initializer=value, trainable=True)
# tf.Variable(value, name=var_name)
else:
var = tf.constant(value, dtype=tf.float32, name=var_name)
self.var_dict[(name, idx)] = var
# print var_name, var.get_shape().as_list()
assert var.get_shape() == initial_value.get_shape()
return var
|
tensorflow.constant
| 9,956 |
from tensorflow.python.framework import ops
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
|
tensorflow.python.framework.ops.RegisterShape
| 9,957 |
import tensorflow as tf
else:
encoder_outputs.append(encoder_outputs_)
encoder_states.append(encoder_state_)
new_encoder_input_length.append(encoder_input_length_)
encoder_state = tf.concat(encoder_states, 1)
return encoder_outputs, encoder_state, new_encoder_input_length
def compute_energy(hidden, state, encoder, time=None, input_length=None, prev_weights=None, **kwargs):
batch_size = tf.shape(hidden)[0]
time_steps = tf.shape(hidden)[1]
if encoder.attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape)
if encoder.mult_attn:
state = dense(state, encoder.attn_size, use_bias=False, name='state')
|
tensorflow.shape
| 9,958 |
from tensorflow.python.framework import constant_op
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
|
tensorflow.python.framework.constant_op.constant
| 9,959 |
from tensorflow.python.ops import array_ops
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(target), logits=logits)
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
|
tensorflow.python.ops.array_ops.expand_dims
| 9,960 |
import tensorflow as tf
"""LSTM layer."""
words, nwords = tensors
t = tf.transpose(words, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
outputs_fw, (hidden_fw, output_fw) = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords)
if bidirectional:
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs)
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords)
outputs = tf.concat([outputs_fw, outputs_bw], axis=-1)
hidden = tf.concat([hidden_fw, hidden_bw], axis=-1)
output = tf.concat([output_fw, output_bw], axis=-1)
else:
outputs = outputs_fw
hidden = hidden_fw
output = output_fw
outputs = tf.transpose(outputs, perm=[1, 0, 2])
return (outputs, hidden, output)
|
tensorflow.concat
| 9,961 |
from tensorflow.python.ops import array_ops
result.update(_run_metrics(class_predictions, targets, class_metrics,
self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
result.update(_run_metrics(predictions, targets, proba_metrics,
self.get_weight_tensor(features)))
return result
class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
def loss_fn(logits, target):
check_shape_op = logging_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return losses.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name)
def logits_to_predictions(self, logits, proba=False):
if proba:
|
tensorflow.python.ops.array_ops.rank
| 9,962 |
import tensorflow as tf
nms_classes2,
_) = isu.instance_non_maximum_suppression_1d_scores(
masks,
scores,
classes,
min_score_thresh=0.65,
min_iou_thresh=0.5,
is_class_agnostic=False)
nms_masks_expected2 = tf.stack([mask0, mask1, mask4, mask2])
nms_scores_expected2 = tf.constant([1.0, 0.9, 0.85, 0.8], dtype=tf.float32)
nms_classes_expected2 = tf.constant([1, 2, 2, 3], dtype=tf.int32)
self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())
self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy())
self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy())
self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy())
self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy())
self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy())
def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self):
masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32)
|
tensorflow.constant
| 9,963 |
from tensorflow.python.ops import variable_scope
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if conv_dims is not None and conv_dims + 2 != input_rank:
raise ValueError('Convolution expects input with rank %d, got %d' % (conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = MyConv2D
elif input_rank == 5:
|
tensorflow.python.ops.variable_scope.variable_scope
| 9,964 |
import tensorflow as tf
return host_call_fn, [global_step_tensor] + other_tensors
def two_stream_loss(FLAGS, features, labels, mems, is_training):
"""Pretraining loss with two-stream attention Transformer-XL."""
#### Unpack input
mem_name = "mems"
mems = mems.get(mem_name, None)
inp_k = tf.transpose(features["input_k"], [1, 0])
inp_q = tf.transpose(features["input_q"], [1, 0])
seg_id = tf.transpose(features["seg_id"], [1, 0])
inp_mask = None
perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0])
if FLAGS.num_predict is not None:
# [num_predict x tgt_len x bsz]
|
tensorflow.transpose
| 9,965 |
import tensorflow as tf
gsum = tf.reduce_sum(gstack, axis=1)
phi = tf.get_variable("phi", (self.g_dim, self.k))
w = tf.matmul(gsum, phi)
w = tf.expand_dims(w, [2])
# Calculate policy and sample
logits = tf.reshape(tf.matmul(U, w), [-1, num_acts])
self.pi = tf.nn.softmax(logits)
self.log_pi = tf.nn.log_softmax(logits)
self.sample = policy_utils.categorical_sample(
tf.reshape(logits, [-1, num_acts]), num_acts)[0, :]
def build_value(self, _input):
with tf.variable_scope('VF'):
hidden = tf.layers.dense(inputs=_input,
units=self.vf_hidden_size,
activation=tf.nn.elu)
w = tf.get_variable("weights", (self.vf_hidden_size, 1))
return tf.matmul(hidden, w)
def build_loss(self):
cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1])
dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1)
gcut = tf.stop_gradient(self.g)
|
tensorflow.variable_scope
| 9,966 |
import tensorflow as tf
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
|
tensorflow.device
| 9,967 |
import tensorflow as tf
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
|
tensorflow.gfile.Exists
| 9,968 |
import tensorflow as tf
if not from_logits:
# transform back to logits
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output / (1 - output))
try:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
except TypeError:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target)
def sum(x, axis=None, keepdims=False):
|
tensorflow.nn.sigmoid_cross_entropy_with_logits
| 9,969 |
import tensorflow as tf
with tf.Session() as sess:
|
tensorflow.Session
| 9,970 |
import tensorflow as tf
def create_variable_for_generator(name, batch_size):
return tf.get_variable('learnable_dlatents',
shape=(batch_size, 18, 512),
dtype='float32',
initializer=tf.initializers.random_normal())
class Generator:
|
tensorflow.initializers.random_normal
| 9,971 |
from tensorflow.python.framework import ops
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
|
tensorflow.python.framework.ops.op_scope
| 9,972 |
from tensorflow.python.framework import ops
softmax cross entropy loss.
"""
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
@ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
|
tensorflow.python.framework.ops.RegisterShape
| 9,973 |
import tensorflow as tf
x = self.__conv2d(
name="{}2c".format(conv_name_base),
inputs=x, filter_depth=filter_depth3, kernel_size=1,
padding="same", stride=1
)
x = self.__batch_norm("{}2c".format(bn_name_base), x)
x = tf.add(x, shortcut)
return tf.nn.relu(x)
def __identity_block(self, stage, block, inputs,
filter_depths, kernel_size):
filter_depth1, filter_depth2, filter_depth3 = filter_depths
|
tensorflow.add
| 9,974 |
import tensorflow as tf
diag_indeces = [0]
for row in range(1, self.nb_actions):
diag_indeces.append(diag_indeces[-1] + (row + 1))
diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero
diag_mask[np.array(diag_indeces) + 1] = 1
diag_mask = K.variable(diag_mask)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
|
tensorflow.shape
| 9,975 |
from tensorflow.python.training import moving_averages
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
|
tensorflow.python.training.moving_averages.assign_moving_average
| 9,976 |
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
new_h = (1. - att_score) * state + att_score * c
return new_h, new_h
|
tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear
| 9,977 |
import tensorflow as tf
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
if pos is not None and encoder.attn_window_size > 0:
# `pred_edits` scenario, where we know the aligned pos
# when the windows size is non-zero, we concatenate consecutive encoder states
# and map it to the right attention vector size.
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = []
for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1):
|
tensorflow.minimum
| 9,978 |
import tensorflow as tf
def loss(self, logits, forward_only=None):
cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32))
mean_cost = tf.reduce_mean(cost)
y_pred = tf.argmax(logits, 1)
correct_pred = tf.equal(y_pred, tf.argmax(self.y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if forward_only:
str_summary_type = 'eval'
loss_summ = tf.summary.scalar("{0}_loss".format(str_summary_type), mean_cost)
acc_summ = tf.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy)
merged = tf.summary.merge([loss_summ, acc_summ])
return mean_cost, accuracy, y_pred, merged
else:
return mean_cost, accuracy, y_pred
def training(self, cost):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
# train_op = optimizer.minimize(cost)
trainables = tf.trainable_variables()
|
tensorflow.summary.merge
| 9,979 |
import tensorflow as tf
if FLAGS.new_blur:
net = net[..., :self.batch_shape[-1]]
net = tf.nn.relu(net)
net = tf.cast(net <= 1, net.dtype) * net * 255
|
tensorflow.nn.relu
| 9,980 |
import tensorflow as tf
staging_delta_ops += gpu_grad_stage_ops
if staging_delta_ops:
enqueue_ops.append(tf.group(*(staging_delta_ops)))
|
tensorflow.group
| 9,981 |
import tensorflow as tf
# the output
state_size = lstm_cell.state_size
# the LSTMs are stateful. To support multiple batch sizes,
# we'll allocate size for states up to max_batch_size,
# then use the first batch_size entries for each batch
init_states = [
tf.Variable(
tf.zeros([self._max_batch_size, dim]),
trainable=False
)
for dim in lstm_cell.state_size
]
batch_init_states = [
state[:batch_size, :] for state in init_states
|
tensorflow.zeros
| 9,982 |
import tensorflow as tf
w = tf.get_variable("w", [nx, ny], initializer=w_init)
b = tf.get_variable("b", [ny], initializer=b_init)
return tf.matmul(x, w)+b
def model(X, M, Y, train=False, reuse=False):
with tf.variable_scope('model', reuse=reuse):
we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02))
we = dropout(we, embd_pdrop, train)
#X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2]
X = tf.reshape(X, [-1, n_ctx, 2])
M = tf.reshape(M, [-1, n_ctx])
h = embed(X, we)
#h=[-1,n_ctx,emb]
for layer in range(n_layer):
h = block(h, 'h%d'%layer, train=train, scale=True)
#h=[-1,n_ctx,emb] lm_h [-1,emb]
lm_h = tf.reshape(h[:, :-1], [-1, n_embd])
|
tensorflow.reshape
| 9,983 |
import tensorflow as tf
'execution_barrier_', [])
global_step = tf.contrib.framework.get_global_step()
with tf.device(self.global_step_device):
|
tensorflow.contrib.framework.get_global_step
| 9,984 |
from tensorflow.python.ops import math_ops
tuple.
"""
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.mul(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.sub(1.0, mean_distance)
update_op = math_ops.sub(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None,
metrics_collections=None,
|
tensorflow.python.ops.math_ops.sub
| 9,985 |
import tensorflow as tf
# Under the benchmarking mode, user can specify whether nor not to use
# the forward-only option, which will only compute the loss function.
# forward-only cannot be enabled with eval at the same time.
tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking')
tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or
training for benchmarking""")
tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device')
tf.flags.DEFINE_integer('num_batches', 100,
'number of batches to run, excluding warmup')
tf.flags.DEFINE_integer('num_warmup_batches', None,
'number of batches to run before timing')
tf.flags.DEFINE_integer('autotune_threshold', None,
|
tensorflow.flags.DEFINE_integer
| 9,986 |
import tensorflow as tf
checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform,
|
tensorflow.train.Checkpoint
| 9,987 |
import tensorflow as tf
Virtual Batch Normalization
"""
def __init__(self, x, name, epsilon=1e-5):
"""
x is the reference batch
"""
assert isinstance(epsilon, float)
shape = x.get_shape().as_list()
with tf.variable_scope(name) as scope:
self.epsilon = epsilon
self.name = name
self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True)
self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True)
self.batch_size = int(x.get_shape()[0])
assert x is not None
assert self.mean is not None
assert self.mean_sq is not None
out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference"))
|
tensorflow.variable_scope
| 9,988 |
import tensorflow as tf
'backbone', 'seresnext50',#or seresnext50 seresnet50
'The backbone network to use for feature pyramid.')
tf.app.flags.DEFINE_float(
'heatmap_sigma', 1.,
'The sigma of Gaussian which generate the target heatmap.')
tf.app.flags.DEFINE_float(
'bbox_border', 25.,
'The nearest distance of the crop border to al keypoints.')
tf.app.flags.DEFINE_integer(
'train_epochs', 50,
|
tensorflow.app.flags.DEFINE_float
| 9,989 |
import tensorflow as tf
ret = tf.image.resize_bicubic([image], shape)
return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0]
def resize_shortest_edge(image, image_shape, size):
shape = tf.cast(image_shape, tf.float32)
w_greater = tf.greater(image_shape[0], image_shape[1])
shape = tf.cond(w_greater,
lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32),
lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32))
return uint8_resize_bicubic(image, shape)
def center_crop(image, size):
image_height = tf.shape(image)[0]
|
tensorflow.cast
| 9,990 |
import tensorflow as tf
1. / model_options.output_stride)
image_feature = slim.avg_pool2d(
features, [pool_height, pool_width],
model_options.image_pooling_stride, padding='VALID')
resize_height = scale_dimension(
model_options.crop_size[0],
1. / model_options.output_stride)
resize_width = scale_dimension(
model_options.crop_size[1],
1. / model_options.output_stride)
else:
# If crop_size is None, we simply do global pooling.
pool_height = tf.shape(features)[1]
pool_width = tf.shape(features)[2]
image_feature = tf.reduce_mean(
features, axis=[1, 2], keepdims=True)
resize_height = pool_height
resize_width = pool_width
image_feature_activation_fn = tf.nn.relu
image_feature_normalizer_fn = batch_norm
if model_options.aspp_with_squeeze_and_excitation:
image_feature_activation_fn = tf.nn.sigmoid
if model_options.image_se_uses_qsigmoid:
image_feature_activation_fn = utils.q_sigmoid
image_feature_normalizer_fn = None
image_feature = slim.conv2d(
image_feature, depth, 1,
activation_fn=image_feature_activation_fn,
|
tensorflow.shape
| 9,991 |
import tensorflow as tf
if encoder.attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape)
if encoder.mult_attn:
state = dense(state, encoder.attn_size, use_bias=False, name='state')
hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if encoder.layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
|
tensorflow.einsum
| 9,992 |
import tensorflow as tf
fetches, feeds = subgraphs['default']
self.assertTrue('wav' in feeds)
for name in ['hypotheses', 'scores', 'src_frames', 'encoder_frames']:
self.assertTrue(name in fetches)
with open(
test_helper.test_src_dir_path('tools/testdata/gan_or_vae.16k.wav'),
'rb') as f:
wav = f.read()
sess.run(tf.global_variables_initializer())
fetches = sess.run(fetches, {feeds['wav']: wav})
self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),
fetches['hypotheses'].shape)
self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),
fetches['scores'].shape)
self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1),
fetches['src_frames'].shape)
self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size),
|
tensorflow.global_variables_initializer
| 9,993 |
import tensorflow as tf
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
|
tensorflow.image.resize_images
| 9,994 |
from tensorflow.python.training import saver as saver_lib
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
|
tensorflow.python.training.saver.latest_checkpoint
| 9,995 |
import tensorflow as tf
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
|
tensorflow.nn.dropout
| 9,996 |
import tensorflow as tf
cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
'''预测,损失,优化'''
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
'''因为rnn_outputs是三维的,这里需要将其转成2维的,
矩阵运算后再转换回来[batch_size, num_steps, num_classes]'''
logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \
shape=[batch_size, num_steps, num_classes])
predictions = tf.nn.softmax(logits)
y_as_list = tf.unstack(y, num=num_steps, axis=1)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
'''训练网络'''
def train_rnn(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size)) # ->(200, 4)
if verbose:
print('\nepoch', idx)
|
tensorflow.reduce_mean
| 9,997 |
import tensorflow as tf
def _evaluate_spherical_harmonics_branch(degree,
order,
theta,
phi,
sign_order,
var_type=tf.float64):
sqrt_2 = tf.constant(1.41421356237, dtype=var_type)
order_float = tf.cast(order, dtype=var_type)
tmp = sqrt_2 * _spherical_harmonics_normalization(
degree, order, var_type) * evaluate_legendre_polynomial(
degree, order, tf.cos(theta))
positive = tmp * tf.cos(order_float * phi)
negative = tmp * tf.sin(order_float * phi)
return tf.where(tf.greater(sign_order, 0), positive, negative)
|
tensorflow.cast
| 9,998 |
import tensorflow as tf
if model_io_config.fix_lm == True:
scope = model_config.scope + "_finetuning"
else:
scope = model_config.scope
with tf.variable_scope(scope, reuse=model_reuse):
(loss,
per_example_loss,
logits) = classifier.classifier(model_config,
model.get_pooled_output(),
num_labels,
|
tensorflow.variable_scope
| 9,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.