seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([
tensorflow.random_uniform
9,300
import tensorflow as tf opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
tensorflow.summary.scalar
9,301
import tensorflow as tf td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tensorflow.reduce_mean
9,302
import tensorflow as tf import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.math.interpolation import trilinear from tensorflow_graphics.projects.points_to_3Dobjects.models import centernet_utils from tensorflow_graphics.projects.points_to_3Dobjects.utils import tf_utils from google3.pyglib import gfile from google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops class ShapeAccuracyMetric: """Computes the accuracy of shpe prediction.""" def __init__(self, k=1): self.metric = tf.keras.metrics.SparseTopKCategoricalAccuracy(k) def update(self, sparse_labels, predicted_probabilities, sample_weights=None): self.metric.update_state(sparse_labels, predicted_probabilities, sample_weights) def evaluate(self): return self.metric.result().numpy() def reset(self): self.metric.reset_states() def get_2d_bounding_box_iou(box1, box2):
tensorflow.keras.metrics.SparseTopKCategoricalAccuracy
9,303
import tensorflow as tf xs = x.get_shape().as_list() if pad=='SAME': target_shape = [tf.shape(x)[0], xs[1]*stride[0], xs[2]*stride[1], num_filters] else: target_shape = [tf.shape(x)[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters] with tf.variable_scope(scope): V = tf.get_variable("V", shape=list(filter_size) + [num_filters, int(x.get_shape()[-1])], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True) g = tf.get_variable("g", shape=[num_filters], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True) b = tf.get_variable("b", shape=[num_filters], dtype=tf.float32, initializer=bias_initializer, trainable=True) def maybe_avg(v): if ema is not None and not init:
tensorflow.variable_scope
9,304
import tensorflow as tf if self.ctx2out: w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += tf.matmul(context, w_ctx2out) if self.prev2out: h_logits += x h_logits = tf.nn.tanh(h_logits) if dropout: h_logits = tf.nn.dropout(h_logits, 0.5) out_logits = tf.matmul(h_logits, w_out) + b_out return out_logits def _batch_norm(self, x, mode='train', name=None): return tf.contrib.layers.batch_norm(inputs=x, decay=0.95, center=True, scale=True, is_training=(mode=='train'), updates_collections=None, scope=(name+'batch_norm')) def build_model(self): features = self.features captions = self.captions batch_size = tf.shape(features)[0] captions_in = captions[:, :self.T] captions_out = captions[:, 1:]
tensorflow.contrib.layers.batch_norm
9,305
import tensorflow as tf with self.test_session() as sess: v = tf.Variable(np.int64(-1), name="v") save = tf.train.Saver({"v": v}) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v" in e.message): sess.run(v) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with tf.Graph().as_default(): v0 = tf.Variable([10.0], name="v0") v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1])) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"): tf.train.Saver([v0, v1, v2]) # The names are different and will work. tf.train.Saver({"vee1": v1, "other": [v2]}) def testBasicsWithListOfVariables(self): save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
tensorflow.Variable
9,306
import tensorflow as tf >>> samples.dtype dtype('float32') """ mu, var = self.build_prior_mean_var(test_points, num_latent, True) jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) @autoflow((tf.float64, [None, None]), (tf.float64, [None, None]), (tf.float64, [None, None])) def compute_posterior_mean_var(self, X, Y, test_points): """Computes the means and variances of the posterior(s).
tensorflow.random_normal
9,307
from tensorflow.python.ops import control_flow_ops # Make sure update_ops are computed before total_loss. if update_ops: with tf.control_dependencies(update_ops): barrier = tf.no_op(name='update_barrier') self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1]) self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1]) self.d_loss_real = control_flow_ops.with_dependencies([barrier], self.d_loss_real) self.d_loss_fake = control_flow_ops.with_dependencies([barrier], self.d_loss_fake)
tensorflow.python.ops.control_flow_ops.with_dependencies
9,308
import tensorflow as tf def encoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00, use_bias=True): with tf.variable_scope(name) as scope: if scale > 1: X = self.conv(name + '_downsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev) else: X = self.conv(name + '_conf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev) if norm == 'I': X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse) elif norm == 'B': X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name) elif norm == 'G': X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = tf.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X) return X def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,
tensorflow.layers.batch_normalization
9,309
import tensorflow as tf # test_X: (400, 128) # test_y: (400, n_class) train_y_1 = to_categorical(train_y_1, n_class_1) test_y_1 = to_categorical(test_y_1, n_class_1) train_y_2 = to_categorical(train_y_2, n_class_2) test_y_2 = to_categorical(test_y_2, n_class_2) return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 def apply_cross_stitch(input1, input2): input1_reshaped = contrib.layers.flatten(input1) input2_reshaped = contrib.layers.flatten(input2) input = tf.concat((input1_reshaped, input2_reshaped), axis=1) # initialize with identity matrix cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32, collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.initializers.identity()) output = tf.matmul(input, cross_stitch) # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape)
tensorflow.concat
9,310
import tensorflow as tf rnd_pred_cri = tf.squeeze(mlp(tf.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) rnd_pred_cri_in_ph = tf.concat([x_ph, a_ph], axis=-1) rnd_pred_cri_in_dim = rnd_pred_cri_in_ph.shape.as_list()[1]
tensorflow.concat
9,311
import tensorflow as tf [candidates[:best_id - 1], _MergeOneToken(tokens, best_id - 1)], axis=0) left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft) def _MergeRight(): return tf.concat( [_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0) right_candidates = tf.cond( tf.greater_equal(best_id, tf.size(tokens) - 1), lambda: empty, _MergeRight) candidates = tf.concat([left_candidates, right_candidates], axis=0) return tokens, candidates return tf.while_loop( _ShouldMerge, _MergeCandidates, (tokens, candidates), parallel_iterations=1, back_prop=False)[0] def Encode(self, text):
tensorflow.size
9,312
import tensorflow as tf d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant(
tensorflow.constant
9,313
import tensorflow as tf return np.array([b" there"]) def read_and_return_strings(x, y): return x + y with self.test_session(): x = tf.constant([b"hello", b"hi"], tf.string) y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string]) z, = tf.py_func(read_and_return_strings, [x, y], [tf.string]) self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"]) def testLarge(self):
tensorflow.constant
9,314
import tensorflow.contrib.layers as layers with tf.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with tf.variable_scope("action_value"): out = layers.fully_connected( out, num_outputs=num_actions, activation_fn=tf.nn.relu, biases_initializer=None, weights_initializer=gauss_initializer,
tensorflow.contrib.layers.fully_connected
9,315
import tensorflow as tf output_propensity_list = [] for i in range(list_size): # Add position information (one-hot vector) click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)] click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1) # click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)] click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)] click_feature[2*list_size:2*list_size+i+1]=[tf.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)] click_feature[3*list_size:3*list_size+list_size-i-1]=[tf.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)] # Predict propensity with a simple network output_propensity_list.append(propensity_network(tf.concat(click_feature, 1), i)) self.click_show=[click_feature[h][0] for h in range(4*list_size)]
tensorflow.expand_dims
9,316
import tensorflow as tf v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0)
tensorflow.expand_dims
9,317
import tensorflow as tf w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1, w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1 ]) return output def _meshgrid(depth, height, width, z_near, z_far): with tf.variable_scope('_meshgrid'): x_t = tf.reshape( tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]), [depth, height, width]) y_t = tf.reshape( tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]), [depth, width, height]) y_t = tf.transpose(y_t, [0, 2, 1]) sample_grid = tf.tile( tf.linspace(float(z_near), float(z_far), depth), [width * height]) z_t = tf.reshape(sample_grid, [height, width, depth]) z_t = tf.transpose(z_t, [2, 0, 1]) z_t = 1 / z_t d_t = 1 / z_t x_t /= z_t
tensorflow.linspace
9,318
import tensorflow as tf values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values
tensorflow.reshape
9,319
import tensorflow as tf def __call__(self, x, update=False): with tf.variable_scope(self.name) as scope: if not update: new_coeff = 1. / (self.batch_size + 1.) old_coeff = 1. - new_coeff new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True) new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True) mean = new_coeff * new_mean + old_coeff * self.mean mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq out = tf.nn.relu(self._normalize(x, mean, mean_sq, "live")) # Update the mean and mean_sq when passing the reference data else: self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True) self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True) out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference")) return out def _normalize(self, x, mean, mean_sq, message): # make sure this is called with a variable scope shape = x.get_shape().as_list() assert len(shape) == 4 self.gamma = safe_get("gamma", [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) gamma = tf.reshape(self.gamma, [1, 1, 1, -1]) self.beta = safe_get("beta", [shape[-1]],
tensorflow.reduce_mean
9,320
import tensorflow as tf (chars, sequence_length) = inputs # Transpose the first and second dimensions so that chars is of shape # [time_steps, batch_size, dimension]. chars = tf.transpose(chars, [1, 0, 2]) # The outer loop cycles through the layers of the RNN; the inner loop # executes the time steps for a particular layer.
tensorflow.transpose
9,321
import tensorflow as tf step_model: (DQNPolicy) Policy for evaluation """ n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n with tf.variable_scope("input", reuse=reuse): stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") with tf.variable_scope(scope, reuse=reuse): if param_noise: act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
tensorflow.placeholder
9,322
import tensorflow as tf def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"]
tensorflow.logging.info
9,323
import tensorflow as tf import tensorflow as tf import tensorflow_probability as tfp from normalizing_flows.flows import Transform from . import Parameterize def gaussianize(x, mus, log_sigmas, inverse=tf.constant(False)): if inverse: z = tf.math.exp(log_sigmas)*x + mus ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3]) else:
tensorflow.constant
9,324
import tensorflow as tf im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50) # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False, W_init=tf.random_normal_initializer(stddev=0.02)), \ argscope([Conv2D, Deconv2D, InstanceNorm], data_format='NCHW'), \ argscope(LeakyReLU, alpha=0.2): with tf.variable_scope('gen'): with tf.variable_scope('B'): AB = self.generator(A)
tensorflow.random_normal_initializer
9,325
import tensorflow as tf self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH)
tensorflow.placeholder
9,326
import tensorflow as tf from utils.external import resnet_model as ResNet from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient') def forward_fn(inputs, is_train, data_format): """Forward pass function. Args: * inputs: inputs to the network's forward pass
tensorflow.app.flags.DEFINE_float
9,327
import tensorflow as tf #Q_filter_1 = tf.cast(qf1 > min_q,tf.float32) #Q_filter_2 = tf.cast(qf2 > min_q,tf.float32) im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph #im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = tf.reduce_mean(im_loss1) #actor_loss_di2 = tf.reduce_mean(im_loss2) self.actor_loss_di = tf.reduce_mean(im_loss1) imitation_for_priority = tf.reduce_mean(im_loss1,axis=1) regularizerpi = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi") all_trainable_weights_pi = tf.trainable_variables('model/pi') regularization_penalty_pi = tf.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi) policy_loss = policy_kl_loss + regularization_penalty_pi + self.actor_loss_di # Target for value fn regression
tensorflow.contrib.layers.l1_l2_regularizer
9,328
import tensorflow as tf def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold elif not do_serve: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*"
tensorflow.train.init_from_checkpoint
9,329
import tensorflow as tf "see the trainable variables" # print("The trainable variables are:") variable_names = [v.name for v in tf.trainable_variables()] variable_shapes = [v.get_shape() for v in tf.trainable_variables()] parameter_num = 0
tensorflow.trainable_variables
9,330
import tensorflow as tf elif activation == "elu": new_context_act = tf.nn.elu(new_context) elif activation == "linear": new_context_act = tf.identity(new_context) else: raise RuntimeError
tensorflow.identity
9,331
import tensorflow as tf features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label
tensorflow.decode_raw
9,332
import tensorflow as tf elif actL == 'esp' or actL == 'relu': #r2 score norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) ) accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = tf.sigmoid(betan*hn) correct = tf.equal(tf.argmax(Yp), tf.argmax(Y)) accuracy= tf.reduce_mean(tf.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = tf.global_variables_initializer() with tf.Session() as sess: # Run the initialization sess.run(init) jj=0 for epoch in range(num_iterations): _ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr}) # Print the cost every interval epoch (here uses the inhomogenous interval but you can change it) if jj< e_len and epoch % epoch_sample[jj] == 0: #if epoch % 50 == 0: print("Epoch %i, Cost: %f, Train accuracy: %f" % (epoch, epoch_cost,epoch_acc_train))
tensorflow.Session
9,333
import tensorflow as tf x = tf.nn.conv2d(x, tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2]), [1] + list(stride) + [1], pad) init_scale=.01 m_init, v_init = tf.nn.moments(x, [0,1,2]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, 1, 1, num_filters]) * (x - tf.reshape(m_init, [1, 1, 1, num_filters])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b)
tensorflow.reshape
9,334
from tensorflow.contrib.framework import deprecated_args return streaming_mean(is_correct, weights, metrics_collections, updates_collections, name or 'accuracy') @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_precision(predictions, labels, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None):
tensorflow.contrib.framework.deprecated_args
9,335
import tensorflow as tf tf.logging.info("***** Running evaluation *****")
tensorflow.logging.info
9,336
import tensorflow as tf else: return tf.reshape(tf.stack(values=h, axis=1), [-1])
tensorflow.stack
9,337
import tensorflow as tf edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return:
tensorflow.SparseTensor
9,338
from tensorflow.python.ops import variable_scope # accuracy is calculated only under 'ce_train', where true answer is given if mode_gen == 'ce_train': accuracy = _mask_and_accuracy(vocab_scores, answer_batch, loss_weights) return accuracy, self._loss, sampled_words else: return None, self._loss, sampled_words def calculate_encoder_features(self, encoder_states, encoder_dim): options = self.options input_shape = tf.shape(encoder_states) batch_size = input_shape[0] passage_len = input_shape[1] with variable_scope.variable_scope("attention_decoder"): encoder_features = tf.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim] W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size]) self.W_h = W_h encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size] encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size]) return encoder_features def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask): options = self.options with variable_scope.variable_scope("attention_decoder"): v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage:
tensorflow.python.ops.variable_scope.get_variable
9,339
import tensorflow as tf labeled_poses = (labeled_rotations_3d, labeled_translations_3d, labeled_sizes_3d) # Predictions predicted_classes = tf.cast(detections['detection_classes'], tf.int64) predicted_permutation = np.argsort(predicted_classes) predicted_classes = predicted_classes.numpy()[predicted_permutation]
tensorflow.cast
9,340
import tensorflow as tf value=self.episode_indices[num_episodes: self.episode_count] ) # Decrement episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes) # Assign new observations. with tf.control_dependencies(control_inputs=(assignment,)): assignments = list() for name in sorted(states): assignments.append(tf.scatter_update( ref=self.states_memory[name], indices=indices, updates=states[name] ))
tensorflow.control_dependencies
9,341
import tensorflow as tf eval_mgpu_logits, eval_mgpu_clf_losses, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train, Y_train) eval_logits, eval_clf_losses, eval_lm_losses = model(X, M, Y, train=False, reuse=True) eval_clf_loss = tf.reduce_mean(eval_clf_losses) eval_mgpu_clf_loss = tf.reduce_mean(eval_mgpu_clf_losses) n_updates = 0
tensorflow.reduce_mean
9,342
import tensorflow as tf keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment),
tensorflow.square
9,343
import tensorflow as tf init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.logging.info
9,344
import tensorflow as tf input_props.append((tf.string, [None, None])) # Tokens. input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings. input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings. input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings. input_props.append((tf.int32, [None, None, None])) # Character indices. input_props.append((tf.int32, [None])) # Text lengths. input_props.append((tf.int32, [None])) # Speaker IDs. input_props.append((tf.int32, [])) # Genre. input_props.append((tf.bool, [])) # Is training. input_props.append((tf.int32, [None])) # Gold starts. input_props.append((tf.int32, [None])) # Gold ends. input_props.append((tf.int32, [None])) # Cluster ids. self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props] dtypes, shapes = zip(*input_props) queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes) self.enqueue_op = queue.enqueue(self.queue_input_tensors) self.input_tensors = queue.dequeue() self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors) self.global_step = tf.Variable(0, name="global_step", trainable=False) self.reset_global_step = tf.assign(self.global_step, 0) learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step, self.config["decay_frequency"], self.config["decay_rate"], staircase=True) trainable_params = tf.trainable_variables() gradients = tf.gradients(self.loss, trainable_params) gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"]) optimizers = { "adam" : tf.train.AdamOptimizer, "sgd" : tf.train.GradientDescentOptimizer
tensorflow.PaddingFIFOQueue
9,345
import tensorflow as tf gn_grads_, gn_grads_true_, v_grads_, v_grads_true_ = sess.run( [gn_grads, gn_grads_true, v_grads, v_grads_true]) np.testing.assert_array_equal(gn_grads_, gn_grads_true_) np.testing.assert_array_equal(v_grads_, v_grads_true_) def test_get_train_op(self): """Tests get_train_op. """ var = tf.Variable(0.) loss = tf.nn.l2_loss(var) train_op = opt.get_train_op(loss) self.assertTrue(tf.contrib.framework.is_tensor(train_op)) if __name__ == "__main__": tf.test.main()
tensorflow.contrib.framework.is_tensor
9,346
import tensorflow as tf """ image = tf.image.resize_with_crop_or_pad(image, 40, 40) image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image)
tensorflow.image.random_crop
9,347
import tensorflow as tf config.gpu_options.allow_growth = True sess_grow = tf.Session(config=config) # Also, we can limit the size of GPU memory used, with the following option config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # How to set placements on multiple devices. # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:2'): d = tf.matmul(b,a) flat_d = tf.reshape(d, [-1]) combined = tf.mul(c, flat_d) print(sess.run(combined))
tensorflow.constant
9,348
import tensorflow as tf dataset = dataset.map(map_fn, num_parallel_calls=16) else: types = {movielens.USER_COLUMN: rconst.USER_DTYPE, movielens.ITEM_COLUMN: rconst.ITEM_DTYPE} shapes = {movielens.USER_COLUMN: tf.TensorShape([batch_size]), movielens.ITEM_COLUMN: tf.TensorShape([batch_size])} if self._is_training: types[rconst.VALID_POINT_MASK] = np.bool shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size]) types = (types, np.bool) shapes = (shapes, tf.TensorShape([batch_size])) else: types[rconst.DUPLICATE_MASK] = np.bool shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size]) data_generator = functools.partial(
tensorflow.TensorShape
9,349
from tensorflow.contrib.learn.python.learn.datasets import base train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv') if not (gfile.Exists(train_path) and gfile.Exists(test_path)): archive_path = base.maybe_download( 'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL) tfile = tarfile.open(archive_path, 'r:*')
tensorflow.contrib.learn.python.learn.datasets.base.maybe_download
9,350
import tensorflow as tf def subpixel_conv3d(x, r, out_channels): x = tf.split(x, out_channels, 4) x = tf.concat([phase_shift_3d(v, r) for v in x], 4) return x def pixel_shuffler_3d(x, r, k, out_channels, name): in_channels = x.get_shape.as_list()[4] with tf.variable_scope(name): u = conv3d(x, [k, k, k, in_channels, out_channels*pow(r, 3)], 'conv', bias=True, stride=1) h = subpixel_conv3d(u, r, out_channels) return h def minibatch_discrimination(x, n_kernels, dim_per_kernel, name): with tf.variable_scope(name): batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = tf.eye(batch_size) big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask def half(tens, second): m, n, _ = tens.get_shape().as_list()
tensorflow.variable_scope
9,351
from tensorflow.python.framework import ops ps = server_lib.Server(cs, job_name="ps", start=True) return worker, ps @contextlib.contextmanager def _maybeWithDevice(self, device): if device is not None: with ops.device(device): yield else: yield def _setupDense(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None):
tensorflow.python.framework.ops.device
9,352
import tensorflow as tf return xs, s def conv_to_fc(x): nh = np.prod([v.value for v in x.get_shape()[1:]]) x = tf.reshape(x, [-1, nh]) return x def discount_with_dones(rewards, dones, gamma):
tensorflow.reshape
9,353
import tensorflow as tf Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() #Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) num_sample = hps.batch_size*FLAGS.eval_batch_count # Initialize results to save entropy_test_adv_all = np.array([]) confidence_test_adv_all = np.array([]) entropy_test_nor_all = np.array([]) confidence_test_nor_all = np.array([])
tensorflow.logging.info
9,354
import tensorflow as tf from tensorflow.python.framework import ops from collections import namedtuple import logger from tf_conv_dims import calc_padding_4d, calc_out_size_4d, calc_out_size_4d_np log = logger.get() sbnet_module = tf.load_op_library('../sbnet_ops/libsbnet.so') BlockParams = namedtuple('BlockParams', ['bsize', 'bsize_out', 'boffset', 'bcount', 'bstrides']) # Gradients registration. @ops.RegisterGradient("SparseGather") def _sparse_gather_grad(op, grad): # x is shaped like full tensor [NHWC]
tensorflow.load_op_library
9,355
import tensorflow as tf train_op = optimizer.apply_gradients(capped_gvs) return train_op @staticmethod def seq_length(data): used = tf.sign(tf.reduce_max(tf.abs(data), axis=2)) length = tf.reduce_sum(used, axis=1) length = tf.cast(length, tf.int64) return length @staticmethod def last_relevant(outputs, length): # Borrowed from: https://gist.github.com/rockt/f4f9df5674f3da6a32786bcf9fbb6a88 batch_size, max_length, hidden_size = tf.unstack(tf.shape(outputs)) index = tf.range(0, batch_size) * max_length + (tf.cast(length, tf.int32) - 1) flat = tf.reshape(outputs, [-1, hidden_size]) relevant = tf.gather(flat, index) return relevant
tensorflow.range
9,356
import tensorflow as tf "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from "
tensorflow.flags.DEFINE_string
9,357
import tensorflow as tf adv_exs = [ emb + _scale_l2(tf.stop_gradient(g), perturb_norm_length) for emb, g in zip(embedded, grads)
tensorflow.stop_gradient
9,358
import tensorflow as tf print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var))
tensorflow.fill
9,359
import tensorflow as tf def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all()
tensorflow.summary.histogram
9,360
import tensorflow as tf self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam), fetches['scores'].shape) self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1), fetches['src_frames'].shape) self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size), fetches['encoder_frames'].shape) if __name__ == '__main__': tf.test.main()
tensorflow.test.main
9,361
import tensorflow as tf #im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = tf.reduce_mean(im_loss1) #actor_loss_di2 = tf.reduce_mean(im_loss2) self.actor_loss_di = tf.reduce_mean(im_loss1) imitation_for_priority = tf.reduce_mean(im_loss1,axis=1) regularizerpi = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi") all_trainable_weights_pi = tf.trainable_variables('model/pi') regularization_penalty_pi = tf.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi) policy_loss = policy_kl_loss + regularization_penalty_pi + self.actor_loss_di
tensorflow.trainable_variables
9,362
import tensorflow as tf cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32)
tensorflow.nn.bidirectional_dynamic_rnn
9,363
import tensorflow as tf expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32) self.assertAllEqual(selected_instances.numpy(), expected_selected_instances.numpy()) self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2) self.assertAllClose(distances1.numpy(), distances2.numpy(), atol=0.001) def test_pairwise_iou_matrix(self):
tensorflow.random.uniform
9,364
import tensorflow as tf image = tf.decode_raw(features['image_raw'], tf.float32) else: image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.cast(image, tf.float32) * (1. / 255)
tensorflow.decode_raw
9,365
from tensorflow.python.util.deprecation import deprecated """ dim = 1 for d in variable.get_shape()[1:].as_list(): dim *= d return tf.reshape(variable, shape=[-1, dim], name=name) @deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check naming.") def clear_layers_name(): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') @deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check name reusing.") def set_name_reuse(enable=True): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') def initialize_rnn_state(state, feed_dict=None): """Returns the initialized RNN state. The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`. Parameters ---------- state : RNN state.
tensorflow.python.util.deprecation.deprecated
9,366
import tensorflow as tf padding = padding.upper() if isinstance(k_size, list): filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims] else: filter_shape = [k_size, k_size] + [in_channel, out_dims] if w_init is None: w_init = tf.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = tf.constant_initializer() w = tf.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias:
tensorflow.contrib.layers.variance_scaling_initializer
9,367
import tensorflow as tf def flatten_emb_by_sentence(self, emb, text_len_mask): num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length]) elif emb_rank == 3: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)]) else: raise ValueError("Unsupported rank: {}".format(emb_rank)) return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
tensorflow.reshape
9,368
import tensorflow as tf indices_input = tf.concat(axis=0, values=[indices, tf.reshape(input_, [-1])]) indices_input = tf.reshape(indices_input, [2, -1]) indices_input = tf.transpose(indices_input) res = tf.sparse_to_dense(
tensorflow.transpose
9,369
import tensorflow as tf _phase_infer = _phase.assign(False) # TODO: move to ops def _rank(x): return len(x.get_shape()) def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True): random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32) binary_mask = tf.floor(random_tensor) if normalize: binary_mask = tf.reciprocal(keep_prob) * binary_mask return binary_mask def _global_keep_prob(keep_prob): keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32) keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0) return keep_prob
tensorflow.floor
9,370
import tensorflow as tf # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.shape
9,371
from tensorflow.python.framework import ops return func(x, y, name=name) ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper) del binary_op_wrapper def r_binary_op_wrapper(y, x): with ops.op_scope([x, y], None, op_name) as name: assert isinstance(y, ops.Tensor) x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") return func(x, y, name=name) ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper) del r_binary_op_wrapper # Conversion table for __truediv__. None entries mean no conversion required.
tensorflow.python.framework.ops.convert_to_tensor
9,372
import tensorflow as tf Returns: A tensor with the cross entropy loss. """ logits.get_shape().assert_is_compatible_with(labels.get_shape()) with tf.name_scope(name): num_classes = labels.get_shape()[-1].value labels = tf.cast(labels, logits.dtype) if label_smoothing > 0: smooth_positives = 1.0 - label_smoothing smooth_negatives = label_smoothing / num_classes labels = labels * smooth_positives + smooth_negatives cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value') return loss def l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: var: tensor to regularize. weight_l1: an optional weight to modulate the l1 loss. weight_l2: an optional weight to modulate the l2 loss. name: Optional scope/name for op_scope. Returns:
tensorflow.reduce_mean
9,373
import tensorflow as tf Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. use_xavier: bool, whether to use xavier initializer Returns: Variable Tensor """ if use_xavier: initializer = tf.contrib.layers.xavier_initializer() var = _variable_on_cpu(name, shape, initializer) else: # initializer = tf.truncated_normal_initializer(stddev=stddev) with tf.device('/cpu:0'): var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1])) var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32) var = tf.Variable(var, name='weights') if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
tensorflow.contrib.layers.xavier_initializer
9,374
import tensorflow as tf def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tensorflow.range
9,375
import tensorflow as tf train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total)
tensorflow.variable_scope
9,376
import tensorflow as tf "targets": 3} problem = ModalityObjProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality) @tf.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsInputOnlyModality(self): class InputOnlyProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"inputs": modalities.SymbolModality} hp.vocab_size = {"inputs": 2}
tensorflow.contrib.eager.run_test_in_graph_and_eager_modes
9,377
import tensorflow as tf from scipy.io.wavfile import write from tqdm import tqdm from utils import * # In[2]: def prenet(inputs, num_units=None, is_training=True, scope="prenet"): if num_units is None: num_units = [embed_size, embed_size // 2] with tf.variable_scope(scope): outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name="dense1") outputs = tf.layers.dropout( outputs, rate=dropout_rate, training=is_training, name="dropout1" ) outputs = tf.layers.dense(outputs, units=num_units[1], activation=tf.nn.relu, name="dense2") outputs = tf.layers.dropout( outputs, rate=dropout_rate, training=is_training, name="dropout2" ) return outputs
tensorflow.variable_scope
9,378
import tensorflow as tf self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
tensorflow.layers.max_pooling2d
9,379
import tensorflow as tf emb_values.append(embedding_weight.read_value()) else: emb_values = tf.constant(1.0)
tensorflow.constant
9,380
import tensorflow as tf next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights)
tensorflow.reshape
9,381
from tensorflow.python.ops import array_ops denominator, array_ops.ones_like(denominator))
tensorflow.python.ops.array_ops.ones_like
9,382
import tensorflow as tf elif model_name in ('gcn_vae', 'linear_vae', 'deep_gcn_vae'): opt = OptimizerVAE(preds = model.reconstructions, labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices = False), [-1]), model = model, num_nodes = num_nodes, pos_weight = pos_weight, norm = norm) # Normalization and preprocessing on adjacency matrix adj_norm = preprocess_graph(adj) adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0])) # Initialize TF session sess = tf.Session() sess.run(tf.global_variables_initializer()) # Model training print(f"Training {model_name}...") t = time.time() print_every = 50 for epoch in range(FLAGS.epochs): # Flag to compute running time for each epoch # Construct feed dictionary feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Weights update outs = sess.run([opt.opt_op, opt.cost, opt.accuracy],
tensorflow.global_variables_initializer
9,383
import tensorflow as tf shuffle=True) ul_images = tf.placeholder(shape=images.shape, dtype=tf.float32)
tensorflow.placeholder
9,384
import tensorflow as tf model = model_cls(params) # Multi-GPU setting sharded_losses = parallel.parallel_model( model.get_training_func(initializer), features, params.device_list ) loss = tf.add_n(sharded_losses) / len(sharded_losses) # Create global step global_step = tf.train.get_or_create_global_step() # Print parameters all_weights = {v.name: v for v in tf.trainable_variables()} total_size = 0 for v_name in sorted(list(all_weights)): v = all_weights[v_name] tf.logging.info("%s\tshape %s", v.name[:-2].ljust(80), str(v.shape).ljust(20)) v_size = np.prod(np.array(v.shape.as_list())).tolist() # mutiple all dimension size
tensorflow.train.get_or_create_global_step
9,385
import tensorflow as tf sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv):
tensorflow.layers.dense
9,386
import tensorflow as tf :param n2: no. of output neurons :param name: name of the entire dense layer.i.e, variable scope name. :return: tensor with shape [batch_size, n2] """ with tf.variable_scope(name, reuse=None): weights = tf.get_variable("weights", shape=[n1, n2], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) bias = tf.get_variable("bias", shape=[n2], initializer=tf.constant_initializer(0.0)) out = tf.add(tf.matmul(x, weights), bias, name='matmul') return out # The autoencoder network def encoder(x, reuse=False, supervised=False): """ Encode part of the autoencoder.
tensorflow.matmul
9,387
import tensorflow as tf #[-1,head,n_ctx,emb] return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c
tensorflow.get_variable
9,388
from tensorflow.python.framework import ops def benchmarkTfRNNLSTMBlockCellTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)])
tensorflow.python.framework.ops.Graph
9,389
import tensorflow as tf # max pool pooled = tf.nn.max_pool(H, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) with tf.name_scope("preFc"): # combine all pooled outputs total_filters = num_filter * len(filter_list) # concat all the pooled weights H_pool = tf.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = tf.reshape(H_pool, [-1, total_filters]) with tf.name_scope("dropout"): H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob) # Final (unnormalized) layer with tf.name_scope("output"): W = tf.get_variable("W", shape=[total_filters, nb_classes], initializer=tf.contrib.layers.xavier_initializer()) # add final layer bias
tensorflow.concat
9,390
from tensorflow.python.framework import ops x: `Tensor`. name: `String`. The name to give this op. Returns: x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`. sample_shape: `Tensor` (1D, `int32`). """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") sample_shape, batch_shape, event_shape = self.get_shape(x) event_shape = distribution_util.pick_vector( self._event_ndims_is_0, (1,), event_shape) batch_shape = distribution_util.pick_vector( self._batch_ndims_is_0, (1,), batch_shape) new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape)
tensorflow.python.framework.ops.convert_to_tensor
9,391
import tensorflow as tf sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) if intersection > prev_intersection: prev_intersection = intersection num_collisions += 1 status2 = False if status2: a = 1
tensorflow.nn.relu
9,392
import tensorflow as tf # Extract 8 most features as mentioned in paper self.k_pooled = tf.nn.top_k(tf.transpose(self.layers[-1], [0,2,1]), k=8, name='k_pool', sorted=False)[0] print("8-maxpooling:", self.k_pooled.get_shape()) self.flatten = tf.reshape(self.k_pooled, (-1, 512*8)) # fc1 with tf.variable_scope('fc1'): w = tf.get_variable('w', [self.flatten.get_shape()[1], 2048], initializer=he_normal, regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.flatten, w) + b self.fc1 = tf.nn.relu(out)
tensorflow.variable_scope
9,393
import tensorflow as tf name: Name for this op. Defaults to 'multitask_logits'. Returns: A list of logit tensors; one for each classification task. """ logits_list = [] with tf.name_scope('multitask_logits'): for task_idx in range(num_tasks): with tf.name_scope(name, ('task' + str(task_idx).zfill(len(str(num_tasks)))), [features]): logits_list.append(
tensorflow.name_scope
9,394
import tensorflow as tf def _mark_for_monitoring(self, name, value): tf.add_to_collection(TF_COLLECTION_MONITORED, tf.identity(value, name)) def _add_monitoring_of_values(self): monitored_values = tf.get_collection(TF_COLLECTION_MONITORED) monitored_values = { value.name.split(':')[0]: value # Get rid of ':0' from name for value in monitored_values } for (name, value) in monitored_values.items(): tf.summary.scalar(name, value) summary_op = tf.summary.merge_all() return (summary_op, monitored_values) def _make_var(self, name, shape, dtype=None, no_reg=False, initializer=None, init_constant=None, trainable=True): if initializer is None: if init_constant is not None: initializer = tf.constant_initializer(init_constant, dtype=tf.float32) else: initializer = tf.contrib.keras.initializers.he_normal()
tensorflow.summary.scalar
9,395
from tensorflow.python.framework import ops # # This function gives us the ways to use # multiple devices (executors) in TensorFlow. import tensorflow as tf from tensorflow.python.framework import ops ops.reset_default_graph() # To find out where placement occurs, set 'log_device_placement' sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
tensorflow.python.framework.ops.reset_default_graph
9,396
import tensorflow as tf with tf.variable_scope( "root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2, state_is_tuple=True) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state,
tensorflow.nn.rnn
9,397
import tensorflow as tf grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2] box_xy = (box_xy + tf.cast(grid, dtype)) * stride box_wh = tf.exp(box_wh) * anchors box_x1y1 = box_xy - box_wh / 2. box_x2y2 = box_xy + box_wh / 2. box = tf.concat([box_x1y1, box_x2y2], axis=-1) boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(tf.reshape(obj, (x_shape[0], -1, 1))) classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes))) boxes = tf.concat(boxes, axis=1)
tensorflow.concat
9,398
import tensorflow as tf """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()):
tensorflow.logging.info
9,399