seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf with self.assertRaisesOpError("not in the support"): x = tf.placeholder_with_default(input=[2., 2., 5.], shape=[3]) log_prob = pareto.log_prob(x)
tensorflow.placeholder_with_default
10,300
import tensorflow as tf float32. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_location'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION)
tensorflow.compat.v1.name_scope
10,301
import tensorflow as tf full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent) latent = tf.expand_dims(latent, axis=1) latent = tf.expand_dims(latent, axis=1) latent_mask = tf.layers.dense(latent, filters, name="latent_mask")
tensorflow.expand_dims
10,302
from tensorflow.python.framework import function def testGraphExtension(self): self._testGraphExtensionSave() self._testGraphExtensionRestore() def testStrippedOpListDef(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32) def minus_one(x): return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables() # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph() ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
tensorflow.python.framework.function.Defun
10,303
import tensorflow as tf with tf.variable_scope('decoder_{}'.format(decoder.name)): initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights, context=zero_context) initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights], axis=1) context_size = initial_context.shape[1].value def get_logits(state, ids, time): # for beam-search decoding with tf.variable_scope('decoder_{}'.format(decoder.name)): state, context, pos, prev_weights = tf.split(state, [cell_state_size, context_size, 1, -1], axis=1) input_ = embed(ids) pos = tf.squeeze(pos, axis=1) pos = tf.cond(tf.equal(time, 0), lambda: pos, lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id])) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
tensorflow.split
10,304
import tensorflow as tf result_dict['iou_mean'] = iou_mean result_dict['iou_min'] = iou_min elif isinstance(metric, CollisionMetric): labeled_sdfs = detections['groundtruth_sdfs'] labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) labeled_poses = (sample['rotations_3d'], sample['translations_3d'], sample['sizes_3d'])
tensorflow.cast
10,305
import tensorflow as tf output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners() start = datetime.datetime.now() session.run(output) duration = datetime.datetime.now() - start # There should have been a timeout here because only one sample was added # and the minimum batch size is 2. self.assertLessEqual(.9, duration.total_seconds()) self.assertGreaterEqual(1.5, duration.total_seconds()) outputs = [ f(tf.constant([[1, 3]]), tf.constant([2])), f(tf.constant([[1, 3]]), tf.constant([2])) ] start = datetime.datetime.now() (_, batch_size), _ = session.run(outputs) duration = datetime.datetime.now() - start # The outputs should be executed immediately because two samples are # added. self.assertGreaterEqual(.5, duration.total_seconds()) self.assertEqual(2, batch_size) def test_maximum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(maximum_batch_size=2)
tensorflow.constant
10,306
import tensorflow as tf @staticmethod def layergn(inputdata, name, group_size=32, esp=1e-5): """ :param inputdata: :param name: :param group_size: :param esp: :return: """ with tf.variable_scope(name): inputdata = tf.transpose(inputdata, [0, 3, 1, 2]) n, c, h, w = inputdata.get_shape().as_list() group_size = min(group_size, c) inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w]) mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True) inputdata = (inputdata - mean) / tf.sqrt(var + esp) # 每个通道的gamma和beta gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma') beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta') gamma = tf.reshape(gamma, [1, c, 1, 1]) beta = tf.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = tf.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = tf.transpose(output, [0, 2, 3, 1])
tensorflow.reshape
10,307
import tensorflow as tf if use_bias: bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x * update_mask if padding == "REFLECT": assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size) pad_x = size[0] // 2 pad_y = size[1] // 2 input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT") padding = "VALID" return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='conv' + id, use_bias=use_bias, dilation_rate=(dilation, dilation)) def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1): # zero mean conv
tensorflow.pad
10,308
import tensorflow as tf with tf.variable_scope(name): beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.)) gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02)) pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
tensorflow.random_normal_initializer
10,309
from tensorflow.python.ops import gen_nn_ops logits: Unscaled log probabilities. labels: Each entry `labels[i]` must be an index in `[0, num_classes)`. name: A name for the operation (optional). Returns: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss. """ # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits( logits, labels, name=name) return cost @ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits") def _SparseSoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SparseSoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() input_shape = logits_shape.with_rank(2)
tensorflow.python.ops.gen_nn_ops._sparse_softmax_cross_entropy_with_logits
10,310
import tensorflow as tf test_true = tf.argmax(test_labels, 1) valid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum) valid_inf=work.valid_inference(valid_image_batch) valid_labels=tf.one_hot(valid_label_batch,classnum) #train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) valid_pre = tf.reshape(valid_inf, [validnum, classnum]) valid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1)) valid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32)) valid_pre = tf.argmax(valid_pre, 1) valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables()
tensorflow.argmax
10,311
import tensorflow as tf import numpy as np import random import TensorflowUtils as utils import read_MITSceneParsingDataParis as scene_parsing import datetime import BatchDatsetReader as dataset from six.moves import xrange FLAGS = tf.flags.FLAGS tf.flags.DEFINE_integer("batch_size", "50", "batch size for training") tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory") tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset") tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer") tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat") tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False") tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize") MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat' MAX_ITERATION = int(1e5 + 1)
tensorflow.flags.DEFINE_string
10,312
import tensorflow as tf dtype=tf.float32): """Returns a input_receiver_fn for raw images during serving.""" def _preprocess_image(encoded_image): """Preprocess a single raw image.""" image = tf.image.decode_image(encoded_image, channels=shape[-1]) image.set_shape(shape) return tf.cast(image, dtype) def serving_input_receiver_fn():
tensorflow.image.decode_image
10,313
import tensorflow as tf self.load_size = 64 self.fine_size = 64 self.checkpoint_dir = 'checkpoint' self.sample_dir = 'sample' self.print_freq = 5 self.save_freq = 10 self.pool = ImagePool() return None def build_generator(self,image,reuse=False,name='generator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False """U-Net Generator""" def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables()
tensorflow.get_variable_scope
10,314
import tensorflow as tf prev_x = layer_out # output layers layer_name = 'layer_last' with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, output_node], initializer=tf.truncated_normal_initializer(stddev=0.1))
tensorflow.variable_scope
10,315
import tensorflow as tf if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) if __name__ == "__main__": tf.app.run()
tensorflow.app.run
10,316
import tensorflow as tf Returns: a Tensor of timing signals [batch, seq_len, channels] """ num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
tensorflow.mod
10,317
import tensorflow as tf # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1). t_g = tf.matmul(theta, grid) z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1]) input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat, out_size)
tensorflow.reshape
10,318
import tensorflow as tf Optimization, use Gradient Descent as default ''' with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss, global_step=global_step)
tensorflow.train.GradientDescentOptimizer
10,319
from tensorflow.python.framework import ops from tensorflow_metadata.proto.v0 import schema_pb2 def _get_tensor_value(tensor_or_eager_tensor: tf.Tensor) -> Any: if ops.executing_eagerly_outside_functions(): return np.asarray(tensor_or_eager_tensor) else: with tf.compat.v1.Session():
tensorflow.python.framework.ops.executing_eagerly_outside_functions
10,320
import tensorflow as tf with tf.variable_scope("Context_to_Query_Attention_Layer"): # C = tf.tile(tf.expand_dims(c,2),[1,1,self.q_maxlen,1]) # Q = tf.tile(tf.expand_dims(q,1),[1,self.c_maxlen,1,1]) # S = trilinear([C, Q, C*Q], input_keep_prob = 1.0 - self.dropout) S = optimized_trilinear_for_attention([c, q], self.c_maxlen, self.q_maxlen, input_keep_prob = 1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask = mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask = mask_c), dim = 1),(0,2,1)) self.c2q = tf.matmul(S_, q) self.q2c = tf.matmul(tf.matmul(S_, S_T), c) attention_outputs = [c, self.c2q, c * self.c2q, c * self.q2c] with tf.variable_scope("Model_Encoder_Layer"): inputs = tf.concat(attention_outputs, axis = -1) self.enc = [conv(inputs, d, name = "input_projection")] for i in range(3): if i % 2 == 0: # dropout every 2 blocks self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout)
tensorflow.matmul
10,321
import tensorflow as tf # create localization and classification losses losses = ssd.loss(labels, params) tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss']) tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss'])
tensorflow.losses.add_loss
10,322
import tensorflow as tf truncated_words = words[:max_seq_len] tokens = table.lookup(truncated_words) + 1 tokens = tf.cond( tf.less(tf.size(tokens), max_seq_len), lambda: tf.concat([tokens, [eos]], 0), lambda: tokens) return tf.concat([[bos], tokens], 0)
tensorflow.concat
10,323
import tensorflow as tf Return: A tuple of list: (nodes, adjcents) nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = tf.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
tensorflow.reshape
10,324
import tensorflow as tf update_mu = mu.assign_sub(update * (mu - batch_mean)) update_sigma = sigma.assign_sub(update * (sigma - batch_var)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma)
tensorflow.add_to_collection
10,325
import tensorflow as tf assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, s, scope, nh, init_scale=1.0):
tensorflow.stack
10,326
import tensorflow as tf with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss
tensorflow.reduce_mean
10,327
import tensorflow as tf np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error expected_cross_terms - np.outer(expected_terms, expected_terms), self._numpy_dtype) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._numpy_dtype), self._output_shape, None) ] @common.log_api_use(common.ANALYZER_COLLECTION) def covariance(x: tf.Tensor, dtype: tf.DType, name: Optional[str] = None) -> tf.Tensor:
tensorflow.as_dtype
10,328
import tensorflow as tf # The CNN architecture = conv/poo layers + flatten layer + connected layers with tf.name_scope("cnn"): # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2) self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
tensorflow.layers.dropout
10,329
import tensorflow as tf facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.layers.dense
10,330
import tensorflow as tf if self.preserve_sparsity and not tf.reduce_any(weights == 0): self.preserve_sparsity = False logging.warning( 'Input layer does not contain zero weights, so apply CQAT instead.') centroids_mask = None centroids, lookup = get_unique(weights) num_centroids = tf.size(centroids) if self.preserve_sparsity: sparsity_mask = tf.math.divide_no_nan(weights, weights) zero_idx = tf.argmin(tf.abs(centroids), axis=-1) centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids) result = {SPARSITY_MASK: sparsity_mask} # Prepare clustering variables for the Keras graph when clusters # exist, assuming we do not use number_of_clusters larger than 1024 if num_centroids > 1024: return result else: clst_centroids_tf = layer.add_weight( CLUSTER_CENTROIDS, shape=centroids.shape,
tensorflow.one_hot
10,331
import tensorflow as tf return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
tensorflow.shape
10,332
import tensorflow as tf im_flat = tf.reshape(im, tf.stack([-1, channels])) im_flat = tf.to_float(im_flat) i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1)
tensorflow.gather
10,333
from tensorflow.python.training import server_lib # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({})
tensorflow.python.training.server_lib.ClusterSpec
10,334
import tensorflow as tf #Local logits (predict_ADV,logits_part_adv) = sess.run( [predict_adv, tsne_logit_adv],feed_dict={adv_image:adv_img} ) #Local entropy and confidence for nor_img (entropy_test_nor_help,labels_nor_help,confidence_test_nor_help) = sess.run( [entropy,tf.argmax(predict,axis=1),tf.reduce_max(predict, axis=1)],feed_dict={predict:predict_NOR} ) # Local entropy and confidence for adv_img (entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run( [entropy, tf.argmax(predict, axis=1), tf.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV} ) if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden': print('Log-density-ratio in attacking function of nor/adv is %f'%np.sum(log_density_ratio)) m_tsne_logits_adv = (copy.copy(logits_part_adv)).reshape((1, 64)) m_tsne_logits_adv = np.repeat(m_tsne_logits_adv,100,axis=0) kernel_train = (copy.copy(e_kernel_train[:,:,np.argmax(target_lab)])).reshape((100,64)) log_density_ratio2 = -np.log(1e-30+np.mean(np.exp(-np.sum(np.square(m_tsne_logits_adv - kernel_train), axis=1) / sigma2), axis=0)) + np.log(e_median[np.argmax(target_lab)]) # m_tsne_logits_adv = (copy.copy(logits_part_adv-e_mean[np.argmax(target_lab)])).reshape((64,1))
tensorflow.reduce_max
10,335
import tensorflow as tf sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertAllEqual([6, 7, None, 3, 5], y.shape.as_list()) x = tf.placeholder_with_default( input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None) dist = fake_distribution(batch_shape=[None, 3], event_shape=[None]) sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertAllEqual([6, 7, None, 3, None], y.shape.as_list()) x = tf.placeholder_with_default( input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None) dist = fake_distribution(batch_shape=None, event_shape=None)
tensorflow.convert_to_tensor
10,336
import tensorflow as tf # Maybe create label_priors. label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections) # Calculate weighted loss and other outputs. The log(2.0) term corrects for # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type=surrogate_type, positive_weights=1.0 + lambdas * (1.0 - target_precision), negative_weights=lambdas * target_precision) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2 loss = tf.reshape(weighted_loss - lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable, 'label_priors': label_priors, 'true_positives_lower_bound': true_positives_lower_bound(labels, logits, weights, surrogate_type), 'false_positives_upper_bound': false_positives_upper_bound(labels, logits, weights, surrogate_type) } return loss, other_outputs
tensorflow.reshape
10,337
import tensorflow as tf # create variables with tf.variable_scope(name) as scope: if reuse: scope.reuse_variables() var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) # choose the appropriate moments
tensorflow.constant_initializer
10,338
import tensorflow as tf w_greater = tf.greater(image_shape[0], image_shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return uint8_resize_bicubic(image, shape) def center_crop(image, size): image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = (image_height - size) // 2 offset_width = (image_width - size) // 2 image = tf.slice(image, [offset_height, offset_width, 0], [size, size, -1]) return image def lighting(image, std, eigval, eigvec): v = tf.random_normal(shape=[3], stddev=std) * eigval inc = tf.matmul(eigvec, tf.reshape(v, [3, 1])) image = tf.cast(tf.cast(image, tf.float32) + tf.reshape(inc, [3]), image.dtype) return image def validation_mapper(byte): image = tf.image.decode_jpeg( tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, tf.shape(image), 256)
tensorflow.slice
10,339
import tensorflow as tf self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations') self.weight_ph = tf.placeholder(tf.float32, shape=(None, 1), name='importance_weight') self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph") if self.n_step: self.next_observations_ph_n = self.target_policy.obs_ph self.processed_next_obs_ph_n = self.target_policy.processed_obs self.rewards_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_rewards') self.terminals_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_terminals') with tf.variable_scope("model", reuse=False): # Create the policy # first return value corresponds to deterministic actions # policy_out corresponds to stochastic actions, used for training
tensorflow.placeholder
10,340
import tensorflow as tf tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss0 = tf.reduce_mean(tf.abs(z)) loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3]))) # loss2 = tf.reduce_mean(tf.square((image - logits)*mask2)) # loss = loss1 + loss2 + loss0 # loss = tf.reduce_mean(tf.squared_difference(logits ,annotation )) loss_summary = tf.summary.scalar("entropy", loss)
tensorflow.square
10,341
import tensorflow as tf # Text-Vocab Embedding x_embed = tf.nn.embedding_lookup(identity_mat, x_data) x_col_sums = tf.reduce_sum(x_embed, 0) # Declare model operations x_col_sums_2D = tf.expand_dims(x_col_sums, 0) model_output = tf.add(tf.matmul(x_col_sums_2D, A), b) # Declare loss function (Cross Entropy loss) loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
tensorflow.expand_dims
10,342
from tensorflow.contrib import learn # Setup vocabulary processor vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
tensorflow.contrib.learn.preprocessing.VocabularyProcessor
10,343
import tensorflow as tf if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph)) # Retrieve parameters that must be saved self.params = tf_util.get_trainable_vars("model") self.target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Initialize Variables and target network with self.sess.as_default(): self.sess.run(tf.global_variables_initializer()) self.sess.run(target_init_op) self.summary = tf.summary.merge_all() def pretrain_sac(self,pretrain_steps): print("=====SAC Pretraining=====") for step in range(pretrain_steps): # Compute current learning_rate frac = 1.0 - step / pretrain_steps current_lr = self.learning_rate(frac) # Update policy and critics (q functions)
tensorflow.global_variables_initializer
10,344
import tensorflow as tf 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:], tf.shape(self._unmatched_cls_target)) with tf.control_dependencies([shape_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, **params) reg_targets = self._create_regression_targets(anchors,
tensorflow.control_dependencies
10,345
import tensorflow as tf product1 = tf.matmul(inverted_rgb2lms, self.color_matrix) product2 = tf.matmul(product1, self.rgb2lms) original_image_shape = image.shape simulated_image = tf.transpose(tf.matmul(product2, tf.reshape(tf.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0]) return tf.reshape(simulated_image, original_image_shape)
tensorflow.reshape
10,346
import tensorflow as tf for l in range(1, L): parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) ) parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer()) parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train )
tensorflow.contrib.layers.xavier_initializer
10,347
import tensorflow as tf # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries)
tensorflow.trainable_variables
10,348
from tensorflow.python.ops import math_ops predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) weights = _mask_weights(ignore_mask, weights) true_positives, true_positives_update_op = _streaming_true_positives( predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) false_negatives, false_negatives_update_op = _streaming_false_negatives( predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) def compute_recall(true_positives, false_negatives, name): return math_ops.select( math_ops.greater(true_positives + false_negatives, 0), math_ops.div(true_positives, true_positives + false_negatives), 0, name) recall = compute_recall(true_positives, false_negatives, 'value') with ops.control_dependencies([true_positives_update_op, false_negatives_update_op]): update_op = compute_recall(true_positives, false_negatives, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.ops.math_ops.div
10,349
import tensorflow as tf next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean(
tensorflow.reshape
10,350
import tensorflow as tf with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m) result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, tf.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch( degree_l, order_m, theta, phi, sign_m, var_type)
tensorflow.math.sign
10,351
import tensorflow as tf pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation)
tensorflow.variable_scope
10,352
import tensorflow as tf average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr) def _build_rnn_graph(self, inputs, config, is_training):
tensorflow.gradients
10,353
import tensorflow as tf print("Entropy Decoder") def loop_synthesis(element): y = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) x_coori= tf.cast(x_coori,tf.float32) x = synthesis_transform(x_coori,y) return tf.squeeze(x, [0]) element=[ys,x_coori] xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Synthesis Transform") return xs
tensorflow.squeeze
10,354
import tensorflow as tf prev_layer, keep=0.5, n_units=100, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=0.1), b_init=tf.constant_initializer(value=0.0), W_init_args=None, b_init_args=None, name='dropconnect_layer', ):
tensorflow.constant_initializer
10,355
import tensorflow as tf enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
tensorflow.nn.seq2seq.embedding_attention_seq2seq
10,356
import tensorflow as tf init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
tensorflow.contrib.tpu.TPUEstimatorSpec
10,357
import tensorflow as tf parameters = initialize_parameters(network, activation, stdbeta) betan = tf.identity(parameters['beta'+str(L)], name="betan") #add the output noise to the graph for later retrieval an, hn, _ , _ = FW_prop(X, parameters, activation) #post and pre-activation output of the last layer an = tf.identity(an, name= "an") #add the output post-activation value to the graph for later retrieval hn = tf.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval #Create a saver for the Model if save_model == True: saver = tf.train.Saver() #-----------------Initialize the cost and gradients---------------------------------------------------------
tensorflow.identity
10,358
import tensorflow as tf nms_masks_expected2 = tf.stack([mask0, mask1, mask4, mask2]) nms_scores_expected2 = tf.constant([1.0, 0.9, 0.85, 0.8], dtype=tf.float32) nms_classes_expected2 = tf.constant([1, 2, 2, 3], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy()) self.assertAllClose(nms_scores1.numpy(), nms_scores_expected1.numpy()) self.assertAllEqual(nms_classes1.numpy(), nms_classes_expected1.numpy()) self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_instance_non_maximum_suppression_1d_scores_empty_inputs(self): masks = tf.constant(1.0, shape=[0, 2, 2], dtype=tf.float32) scores = tf.constant([], dtype=tf.float32) classes = tf.constant([], dtype=tf.int32) (nms_masks1, nms_scores1, nms_classes1, _) = isu.instance_non_maximum_suppression_1d_scores( masks, scores, classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=True)
tensorflow.constant
10,359
import tensorflow as tf # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2) self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu)
tensorflow.layers.max_pooling2d
10,360
import tensorflow as tf return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0,
tensorflow.reduce_sum
10,361
import tensorflow as tf if use_coverage and coverage is not None: coverage_features = tf.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size] all_features += coverage_features e = tf.reduce_sum(v * tf.tanh(all_features), axis=-1) # [batch_size, passage_len] attn_dist = nn_ops.softmax(e) # [batch_size, passage_len] attn_dist *= passage_mask
tensorflow.tanh
10,362
import tensorflow as tf trisk = self.train_data['atrisk'][t] d = len(tfail) dr = len(trisk) logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1]) if self.train_data['ties'] == 'breslow': s = cumsum_hazard_ratio[trisk[-1]] logL += tf.log(s) * d elif self.train_data['ties'] == 'efron': s = cumsum_hazard_ratio[trisk[-1]] r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1]) for j in range(d): logL += tf.log(s - j * r / d) else: raise NotImplementedError('tie breaking method not recognized')
tensorflow.log
10,363
import tensorflow as tf if self.task_index == 0 and FLAGS.summary_verbosity > 0: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) fetches = [train_op, total_loss] + enqueue_ops return (enqueue_ops, fetches) def add_forward_pass_and_gradients(
tensorflow.trainable_variables
10,364
import tensorflow as tf self.assertEqual(save_path, val) # Start a second session. In that session the parameter nodes # have not been initialized either. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesWithPredicateMatch(
tensorflow.Variable
10,365
from tensorflow.contrib.distributions.python.ops import distribution_util return math_ops.igammac(math_ops.floor(x + 1), self.rate) def _log_normalization(self): return self.rate def _log_unnormalized_prob(self, x): x = self._assert_valid_sample(x, check_integer=True) return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1) def _mean(self): return array_ops.identity(self.rate) def _variance(self): return array_ops.identity(self.rate) @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow.contrib.distributions.python.ops.distribution_util.AppendDocstring
10,366
import tensorflow as tf tf.expand_dims(candidate_starts, 0), tf.expand_dims(candidate_ends, 0), tf.expand_dims(k, 0), util.shape(context_outputs, 0), True) # [1, k] top_span_indices.set_shape([1, None]) top_span_indices = tf.squeeze(top_span_indices, 0) # [k] top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k] top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k] c = tf.minimum(self.config["max_top_antecedents"], k) if self.config["coarse_to_fine"]: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c) else: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c)
tensorflow.gather
10,367
import tensorflow as tf h = x_shape[1] + pad_h0 + pad_h1 w = x_shape[2] + pad_w0 + pad_w1 pad_h1 += tf.mod(-h + bsize[1], bstrides[1]) pad_w1 += tf.mod(-w + bsize[2], bstrides[2]) return tf.pad(x, [[0, 0], [pad_h0, pad_h1], [pad_w0, pad_w1], [0, 0]]) else: if bstrides is not None: assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] w = x_shape[2] pad_h1 = tf.mod(-h + bsize[1], bstrides[1]) pad_w1 = tf.mod(-w + bsize[2], bstrides[2]) return tf.cond( tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)), lambda: tf.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x) else: return x def _get_offset_array_tf(shape): """ Computes the offset array used to upsample indices with TensorFlow. :param shape: [list] Window shape. """ center = [(ss - 1) // 2 for ss in shape]
tensorflow.greater
10,368
import tensorflow as tf foo = base_model.StatsCounter('foo') val = foo.Value() params = base_layer.BaseLayer.Params() inc = foo.IncBy(params, 100) tf.global_variables_initializer().run() self.assertAllEqual(0, val.eval()) self.assertAllEqual(100, sess.run(inc)) self.assertAllEqual(100, val.eval()) self.assertAllEqual([100, 200], sess.run([val, inc]))
tensorflow.global_variables_initializer
10,369
import tensorflow as tf logger.debug("base conditional") # compute kernel stuff num_func = tf.shape(f)[1] # R Lm = tf.cholesky(Kmm) # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
tensorflow.matmul
10,370
import tensorflow as tf class Conv3d(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with tf.variable_scope(name) : assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev))
tensorflow.variable_scope
10,371
import tensorflow as tf parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe') parser.add_argument('--n_transfer', type=int, default=12) parser.add_argument('--lm_coef', type=float, default=0.5) parser.add_argument('--b1', type=float, default=0.9) parser.add_argument('--b2', type=float, default=0.999) parser.add_argument('--e', type=float, default=1e-8) args = parser.parse_args() print(args) globals().update(args.__dict__) random.seed(seed) np.random.seed(seed) tf.set_random_seed(seed) logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__) text_encoder = TextEncoder(encoder_path, bpe_path) encoder = text_encoder.encoder n_vocab = len(text_encoder.encoder) (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(rocstories(data_dir), encoder=text_encoder) n_y = 2 encoder['_start_'] = len(encoder) encoder['_delimiter_'] = len(encoder) encoder['_classify_'] = len(encoder)
tensorflow.set_random_seed
10,372
import tensorflow as tf # Wittich design def VNET_16L(self, I, is_train, reuse_unet=False, reuse_ada=False, adaption_net=False): def encoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00, use_bias=True): with tf.variable_scope(name) as scope: if scale > 1: X = self.conv(name + '_downsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev) else: X = self.conv(name + '_conf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev) if norm == 'I':
tensorflow.variable_scope
10,373
import tensorflow as tf count=key_counts, mean=key_means, variance=key_variances, weight=tf.zeros_like(key_means, tf.float32)) combiner = WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype,
tensorflow.zeros_like
10,374
import tensorflow as tf def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s
tensorflow.matmul
10,375
import tensorflow as tf return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op,
tensorflow.logging.info
10,376
import tensorflow as tf num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu, do_serve=FLAGS.do_serve ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size)
tensorflow.contrib.tpu.TPUEstimator
10,377
import tensorflow as tf sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testAttentionDecoder2(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.GRUCell(2) inp = [tf.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape)
tensorflow.reshape
10,378
import tensorflow as tf def main(unused_argv): tf.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled.
tensorflow.compat.v1.enable_v2_behavior
10,379
import tensorflow as tf if self.activation is not None: output = self.activation(output) return output nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x)) ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)), tf.ones((1, l[0].get_shape()[1]), dtype=l[1].dtype))), name=name)
tensorflow.zeros_like
10,380
import tensorflow as tf 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient. var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): tf.global_variables_initializer().run() self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsCheckNumerics(self): """ScaleGradients when enable_check_numerics=True.""" FLAGS.enable_check_numerics = True p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient.
tensorflow.is_finite
10,381
import tensorflow as tf n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (patch_size**2) patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = tf.random.shuffle(patches) rows = tf.split(patches,n_col//patch_size,axis=0) rows = [tf.concat(tf.unstack(x),axis=1) for x in rows] x_aug = tf.concat(rows,axis=0) x_aug = tf.convert_to_tensor(x_aug)
tensorflow.split
10,382
import tensorflow as tf policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32] policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm']) test_data = tf.data.Dataset.from_tensor_slices(test_files) test_data = test_data.batch(test_batch_size) test_data = test_data.map(lambda x: tf.py_func( policy['batch_fun'], [x], policy['batch_datatypes']))
tensorflow.data.Dataset.from_tensor_slices
10,383
import tensorflow as tf one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.one_hot
10,384
import tensorflow as tf entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder,
tensorflow.train.Checkpoint
10,385
import tensorflow as tf if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join(
tensorflow.logging.info
10,386
import tensorflow as tf return tf.where( tf.less( tf.random_uniform(common_layers.shape_list(sampled_targets)), hparams.scheduled_sampling_gold_mixin_prob), gold_targets, sampled_targets) def sampled_results(): """Generate scheduled sampling results.""" sampled_targets = dp(sample, sharded_logits) new_targets = dp(mix_gold_sampled, sharded_features["targets"], sampled_targets) new_features = transformed_features with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(target_modality.name): new_features["targets"] = target_modality.targets_bottom_sharded( new_targets, dp) with tf.variable_scope("body"): body_outputs, losses = model.model_fn_sharded(new_features) if not isinstance(losses, dict): # If it's a single extra loss. losses = {"extra": losses} with tf.variable_scope(target_modality.name): new_sharded_logits = target_modality.top_sharded( body_outputs, sharded_features["targets"], dp) if "training" not in losses: training_loss = target_modality.loss_sharded( sharded_logits, sharded_features["targets"], dp)
tensorflow.variable_scope
10,387
import tensorflow as tf if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate")
tensorflow.train.GradientDescentOptimizer
10,388
import tensorflow as tf tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
tensorflow.app.flags.DEFINE_float
10,389
import tensorflow as tf def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size)
tensorflow.train.batch
10,390
import tensorflow as tf def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act),
tensorflow.variable_scope
10,391
import tensorflow as tf cv2.imwrite('generated_image.jpg', G[0, :, :, :] * 50 + 128) if self.is_summary: train_writer.close() validation_writer.close() def _feature_matching_loss(self, real_data_features, fake_data_features): real_data_mean = tf.reduce_mean(real_data_features, axis=0) fake_data_mean = tf.reduce_mean(fake_data_features, axis=0) feature_loss = tf.reduce_mean(tf.abs(tf.subtract(real_data_mean, fake_data_mean))) return feature_loss def _tower_loss_semi_supervised(self, inputs, targets, gpu_idx=0, num_classes=11,
tensorflow.reduce_mean
10,392
import tensorflow as tf with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc: logits = tf.convert_to_tensor(cls_preds) onehot_labels = tf.convert_to_tensor(onehot_labels) precise_logits = tf.cast(logits, tf.float32) if ( logits.dtype == tf.float16) else logits onehot_labels = tf.cast(onehot_labels, precise_logits.dtype) predictions = tf.nn.sigmoid(logits) predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions) # add small value to avoid 0 epsilon = 1e-8 alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32)) alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t) losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon), name=name, axis=1) return losses
tensorflow.equal
10,393
import tensorflow as tf mask_loss_t, learning_rate_t]) else: train_op = None scaffold_fn = None return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call,
tensorflow.contrib.tpu.TPUEstimatorSpec
10,394
import tensorflow as tf self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss) self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real) self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake) self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real) self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake) self.d_sum = tf.summary.merge( [self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum, self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum, self.loss_d_sum]
tensorflow.summary.scalar
10,395
import tensorflow as tf x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions)
tensorflow.layers.dense
10,396
import tensorflow as tf batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
tensorflow.stack
10,397
import tensorflow as tf # Create initial candidate list. candidates = tf.map_fn( self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype) def _ShouldMerge(unused_tokens, candidates): """Merge until not possible, or we abort early according to merge_prob.""" return tf.logical_and( tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)), tf.random.uniform([]) < self._merge_prob) def _MergeOneToken(tokens, i): return tf.expand_dims( self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1)
tensorflow.not_equal
10,398
import tensorflow as tf def testTracker(self): with tf.device(self._test_device): batch_size = 2
tensorflow.device
10,399