seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf logits, loss = model.mtf_model_fn(features, mesh) if use_tpu and logits is not None: logits = mtf.anonymize(logits) # TRAIN mode if mode == tf.estimator.ModeKeys.TRAIN: var_grads = mtf.gradients( [loss], [v.outputs[0] for v in graph.trainable_variables]) lr = learning_rate.learning_rate_schedule(hparams) tf.summary.scalar("learning_rate", lr) mtf_lr = mtf.import_tf_tensor( mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([])) optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr) update_ops = [] for grad, var in zip(var_grads, graph.trainable_variables): update_ops.extend(optimizer.apply_grad(grad, var)) lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tensorflow.summary.scalar
9,500
import tensorflow as tf boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)), num_replicas_in_sync=num_replicas_in_sync) if isinstance(bucket_batch_sizes, list): bucket_batch_sizes = [ int(maximum_lower_multiple(x // num_replicas_in_sync, 8) * num_replicas_in_sync) for x in bucket_batch_sizes] else: bucket_batch_sizes = int(maximum_lower_multiple( bucket_batch_sizes // num_replicas_in_sync, 8) * num_replicas_in_sync) return dataset_utils.batch_examples_by_token( dataset, bucket_boundaries=bucket_boundaries, bucket_batch_sizes=bucket_batch_sizes, padding_values=padding_values, example_length_func=lambda x: {"feature": tf.size(x["feature"]), "label": tf.size(x["label"])}, extra_padded_shapes={"src_lang": [], "trg_lang": []} ) def build_metric_layer(self): return [SequenceTokenMetricLayer("src"), SequenceTokenMetricLayer("trg"), BatchCountMetricLayer("src")] def get_eval_metric(self, args, name="metric", ds=None): """ Returns a neurst.metrics.metric.Metric object for evaluation.""" if ds is None or not hasattr(ds, "trg_lang") or ds.trg_lang is None: logging.info("WARNING: The dataset must have `trg_lang` property, " "otherwise no metric will be created.")
tensorflow.size
9,501
import tensorflow as tf https://github.com/tensorflow/models/blob/master/object_detection/core/box_list_ops.py """ @under_name_scope() def area(boxes): """ Args: boxes: nx4 floatbox Returns: n """ x_min, y_min, x_max, y_max = tf.split(boxes, 4, axis=1) return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) @under_name_scope() def pairwise_intersection(boxlist1, boxlist2): """Compute pairwise intersection areas between boxes. Args: boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise intersections """
tensorflow.squeeze
9,502
import tensorflow as tf # This is the node that will produce the output. output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \ 'network_1/ppo_head_0/policy') # Save the model as a servable model. tf.saved_model.simple_save(session=sess, export_dir='model', inputs={"observation": input_nodes}, outputs={"policy": output_nodes.outputs[0]})
tensorflow.saved_model.simple_save
9,503
import tensorflow as tf with tf.variable_scope(name): moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False) moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False) offset = get_variable("offset", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) scale = get_variable("scale", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), regularizer=tf.nn.l2_loss) mean, variance = tf.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean)
tensorflow.constant_initializer
9,504
import tensorflow as tf d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True)
tensorflow.nn.seq2seq.embedding_rnn_seq2seq
9,505
import tensorflow as tf input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks') global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True)
tensorflow.constant_initializer
9,506
import tensorflow as tf def update(self, state, target, sess=None): sess = sess or tf.get_default_session()
tensorflow.get_default_session
9,507
import tensorflow as tf You almost certainly don't want dropout on there -- it's like randomly setting the (unscaled) probability of a target class to 0.5. Args: features: A 2D tensor with dimensions batch_size x num_features. num_classes: Number of classes for each task. weight_init: Weight initializer. bias_init: Bias initializer. dropout_prob: Float giving dropout probability for weights (NOT keep probability). name: Name for this op. Returns: A logits tensor with shape batch_size x num_classes. """ with tf.name_scope(name, 'logits', [features]) as name: return dropout( fully_connected_layer( features, num_classes, weight_init=weight_init, bias_init=bias_init, name=name), dropout_prob) def softmax_N(tensor, name=None): """Apply softmax across last dimension of a tensor. Args: tensor: Input tensor.
tensorflow.name_scope
9,508
import tensorflow as tf predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: tf.logging.info("***** Predict results *****") for prediction in result: output_line = "\t".join( str(class_probability) for class_probability in prediction) + "\n" writer.write(output_line) if FLAGS.do_export: estimator._export_to_tpu = False estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn)
tensorflow.logging.info
9,509
import tensorflow as tf def batch_norm(x, b_train, scope, reuse=False): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9)
tensorflow.constant
9,510
import tensorflow as tf step=global_step, ) # What are the average Q values of the relabelled tasks? indices = tf.transpose( tf.stack([orig_indices, tf.squeeze(relabel_indices)], axis=0)) relabel_q_vals = tf.gather_nd(logits_vec, indices) tf.compat.v2.summary.scalar( name="relabel_q_vals", data=tf.reduce_mean(relabel_q_vals), step=global_step, ) max_q = tf.reduce_max(logits_vec, axis=1) tf.compat.v2.summary.scalar( name="max_q", data=tf.reduce_mean(max_q), step=global_step) ### End metrics # For both state-centric and goal-centric relabelling, the implementation of # mixing is the same: we randomly replace some of the indices with the # diagonal. relabelled_tasks = tf.gather(candidate_tasks, tf.squeeze(relabel_indices)) if self._relabel_prob == 0: relabelled_tasks = orig_tasks elif 0 < self._relabel_prob < 1: logits = tf.log([1.0 - self._relabel_prob, self._relabel_prob]) mask = tf.squeeze( tf.random.categorical(
tensorflow.reduce_mean
9,511
import tensorflow as tf # When timeout is disabled and minimum/maximum batch size are equal, the # shape is statically known. self.assertEqual(2, a.shape[0].value) assertions_triggered[0] += 1 return a f0(tf.constant([1])) f1(tf.constant([1])) f2(tf.constant([1])) self.assertEqual(3, assertions_triggered[0]) def test_out_of_order_execution1(self):
tensorflow.constant
9,512
import tensorflow as tf str(chi2), "tfp.distributions.Chi2(" "\"silly/\", " # What a silly name that is! "batch_shape=(2,), " "event_shape=(), " "dtype=float32)") # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return exp = tfd.Exponential(rate=tf.placeholder_with_default( input=1., shape=None)) self.assertEqual( str(exp), "tfp.distributions.Exponential(\"Exponential/\", " # No batch shape. "event_shape=(), " "dtype=float32)") def testStrWorksCorrectlyMultivariate(self): mvn_static = tfd.MultivariateNormalDiag( loc=np.zeros([2, 2]), name="MVN")
tensorflow.placeholder_with_default
9,513
import tensorflow as tf for ch in chars: output, state = cell(ch, state) outputs.append(output) # The outputs of this layer are the inputs of the subsequent layer. chars = tf.stack(outputs, axis=0) if training: chars = tf.nn.dropout(chars, self.keep_prob) # Extract the correct output (i.e., hidden state) for each example. All the # character sequences in this batch were padded to the same fixed length so # that they could be easily fed through the above RNN loop. The # `sequence_length` vector tells us the true lengths of the character # sequences, letting us obtain for each sequence the hidden state that was # generated by its non-padding characters. batch_range = [i for i in range(batch_size)] indices = tf.stack([sequence_length - 1, batch_range], axis=1) hidden_states = tf.gather_nd(chars, indices) return self.relu(hidden_states) def loss(labels, predictions): """Computes mean squared loss.""" return tf.reduce_mean(tf.squared_difference(predictions, labels)) def test(model, eval_data): """Computes the average loss on eval_data, which should be a Dataset.""" avg_loss = tfe.metrics.Mean("loss") for (labels, chars, sequence_length) in tfe.Iterator(eval_data): predictions = model((chars, sequence_length), training=False) avg_loss(loss(labels, predictions))
tensorflow.gather_nd
9,514
import tensorflow as tf q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders() q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1 = tf.squeeze(q1, axis=2) with tf.variable_scope('q1', reuse=True): q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1_pi = tf.squeeze(q1_pi, axis=2) with tf.variable_scope('q2'): q2_in_ph = tf.concat([x, a], axis=-1) q2_in_dim = q2_in_ph.shape.as_list()[1]
tensorflow.concat
9,515
import tensorflow as tf self.n_itr = n_itr self.start_itr = start_itr self.num_inner_grad_steps = num_inner_grad_steps if sess is None: sess = tf.Session() self.sess = sess def train(self):
tensorflow.Session
9,516
import tensorflow as tf return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d())
tensorflow.reshape
9,517
import tensorflow as tf outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, config.ans_limit)
tensorflow.nn.softmax
9,518
import tensorflow as tf querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.layers.dense
9,519
import tensorflow as tf output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width],
tensorflow.control_dependencies
9,520
import tensorflow as tf height = imshape[0] x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0) x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0) y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0) y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0) bboxes = tf.concat([x1, y1, x2, y2], axis=1)
tensorflow.minimum
9,521
import tensorflow as tf # Exports the graph as binary format. tf.train.export_meta_graph(filename, as_text=False)
tensorflow.train.export_meta_graph
9,522
import tensorflow as tf im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50) # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False, W_init=tf.random_normal_initializer(stddev=0.02)), \
tensorflow.summary.image
9,523
import tensorflow as tf 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') tf.app.flags.DEFINE_integer('save-model', 1000, 'Number of steps between model saves (default: %(default)d)') # Optimisation hyperparameters tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)') tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)') tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)') tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)') tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)') tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)') tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()), 'Directory where to write event logs and checkpoint. (default: %(default)s)') run_log_dir = os.path.join(FLAGS.log_dir,
tensorflow.app.flags.DEFINE_integer
9,524
import tensorflow as tf with tf.name_scope('pooling_for_un_head'): undep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld] unhead_idxs = tf.tile(tf.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld] if direction is None: direct_mask_un = tf.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: if direction == 'forward': direct_mask_un = tf.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = tf.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_unhead_mask, 2)) pooling_mask = tf.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = tf.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = tf.reduce_sum(tf.cast(pooling_mask, tf.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = tf.where(tf.equal(pooling_den, 0), tf.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / tf.cast(pooling_den, tf.float32)
tensorflow.expand_dims
9,525
import tensorflow as tf with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix if i:
tensorflow.variable_scope
9,526
from tensorflow.python.ops import variable_scope Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'true_positives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 1))
tensorflow.python.ops.variable_scope.variable_scope
9,527
import tensorflow as tf with tf.name_scope("UtterancesEncoder"): conv3 = encoder_embedding # conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3, filter=[1, 3, conv3.size, conv3.size * conv_mul], phase_train=self.phase_train, name='conv_utt_size_3_layer_1' ) encoded_utterances = reduce_max(conv3, [2], keep_dims=True) with tf.name_scope("HistoryEncoder"): conv3 = encoded_utterances conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3, filter=[3, 1, conv3.size, conv3.size * conv_mul], phase_train=self.phase_train, name='conv_hist_size_3_layer_1' ) conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3,
tensorflow.name_scope
9,528
import tensorflow as tf self.end_points_G = self.model.generator([batch_size_train, 100], True, None, batch_size_val) if gpu_idx == 0: G_means = tf.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True) G_vars = tf.reduce_mean(tf.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True) G = tf.Print(
tensorflow.reduce_mean
9,529
from tensorflow.python.ops import variable_scope encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size]) return encoder_features def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask): options = self.options with variable_scope.variable_scope("attention_decoder"): v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0) word_t_representation = self.embedding_lookup(word_t) (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder( state_t_1, context_t_1, coverage_t_1, word_t_representation, encoder_states, encoder_features, passage_word_idx, passage_mask, v, w_c, word_vocab) vocab_scores = tf.log(output_t) greedy_prediction = tf.reshape(tf.argmax(output_t, 1),[-1]) # calcualte greedy multinomial_prediction = tf.reshape(tf.multinomial(vocab_scores, 1),[-1]) # calculate multinomial
tensorflow.python.ops.variable_scope.variable_scope
9,530
import tensorflow as tf if ex_index < 5: tf.logging.info("*** Example ***")
tensorflow.logging.info
9,531
import tensorflow as tf :return: (TensorFlow Tensor) the updated scale expression """ with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01),
tensorflow.control_dependencies
9,532
import tensorflow as tf direct_mask = tf.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld] # tensor tile rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with tf.variable_scope('attention'): # bs,sl,sl,vec
tensorflow.logical_and
9,533
import tensorflow as tf # Calculate the total loss for the current tower. total_loss = tf.add_n(losses, name='total_loss')
tensorflow.add_n
9,534
import tensorflow as tf tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set') tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info') tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps')
tensorflow.app.flags.DEFINE_integer
9,535
import tensorflow as tf new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def testAddCollectionDefFails(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") # Creates a saver. save = tf.train.Saver({"v0": v0}) # Generates MetaGraphDef. meta_graph_def = meta_graph_pb2.MetaGraphDef() # Verifies that collection with unsupported key will not be added. tf.add_to_collection(save, 3) save._add_collection_def(meta_graph_def, save) self.assertEqual(len(meta_graph_def.collection_def), 0) # Verifies that collection where item type does not match expected # type will not be added. tf.add_to_collection("int_collection", 3) tf.add_to_collection("int_collection", 3.5) save._add_collection_def(meta_graph_def, "int_collection") self.assertEqual(len(meta_graph_def.collection_def), 0) def _testMultiSaverCollectionSave(self): test_dir = self._TestDir("saver_collection") filename = os.path.join(test_dir, "metafile")
tensorflow.add_to_collection
9,536
import tensorflow as tf def res_block_3_layers(self, bottom, channel_list, name, change_dimension = False): if (change_dimension): block_conv_input = self.conv_layer(bottom = bottom, kernal_size = 1, in_channels = bottom.get_shape().as_list()[-1], out_channels = channel_list[2], stride = 1, name = name + "_branch1") else: block_conv_input = bottom input_filter = bottom.get_shape().as_list()[-1] block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a") block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_1 = tf.nn.relu(block_norm_1) block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b") block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_2 = tf.nn.relu(block_norm_2) block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = tf.add(block_conv_input, block_conv_3) relu = tf.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name)
tensorflow.layers.batch_normalization
9,537
from tensorflow.python.estimator.canned import head as head_lib def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() # Use core head head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE) model = estimator.GradientBoostedDecisionTreeEstimator( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir,
tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss
9,538
from tensorflow.contrib.learn.python.learn.estimators import run_config classifier.fit(input_fn=_train_input_fn, steps=15) classifier.evaluate(input_fn=_eval_input_fn, steps=1) classifier.export(self._export_dir_base) def testThatLeafIndexIsInPredictions(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() classifier = estimator.GradientBoostedDecisionTreeClassifier( learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[contrib_feature_column.real_valued_column("x")], output_leaf_index=True)
tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig
9,539
import tensorflow as tf tf.scalar_summary(loss.op.name, loss) # Creates the gradient descent optimizer with the given learning rate. optimizer = tf.train.GradientDescentOptimizer(0.01)
tensorflow.train.GradientDescentOptimizer
9,540
import tensorflow as tf minimize(loss, global_step=global_step) else: raise NotImplementedError('activation not recognized') # init op init_op = tf.global_variables_initializer() # Save into class members self.X = X
tensorflow.global_variables_initializer
9,541
import tensorflow as tf [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 mean, var = tf.nn.moments(tgtimg_z, axes=[0]) print(var.get_shape()) # self.simloss /= tf.reduce_mean(var) print(tgtimg_z.get_shape()) self.out = output_h4# + contextimg#tf.nn.tanh(h4) self.out2 = truthoutput_h4 self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) # self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2": self.loss = self.recon1 + self.recon2 elif ablation_type == "L2L3": self.loss = self.recon1 elif ablation_type == "L1": self.loss = self.recon2 + self.simloss class ContextAESweep: def __init__(self, gf_dim=64, df_dim=64, gfc_dim=1024, dfc_dim=1024,
tensorflow.nn.l2_loss
9,542
import tensorflow as tf tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax(
tensorflow.contrib.tpu.TPUEstimatorSpec
9,543
import tensorflow as tf mask4 = tf.constant([[0, 0], [0, 0]], dtype=tf.float32) mask5 = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5]) classes = tf.constant([1, 2, 3, 1, 2, 3], dtype=tf.int32) scores = tf.constant([1.0, 0.9, 0.8, 0.95, 0.85, 0.6], dtype=tf.float32) (nms_masks1,
tensorflow.stack
9,544
import tensorflow as tf def __init__(self, model_dir): super(RestoreMovingAverageHook, self).__init__() self.model_dir = model_dir def begin(self): ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY) variables_to_restore = ema.variables_to_restore() self.load_ema = tf.contrib.framework.assign_from_checkpoint_fn( tf.train.latest_checkpoint(self.model_dir), variables_to_restore )
tensorflow.train.ExponentialMovingAverage
9,545
from tensorflow.python.ops import math_ops def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed) def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) - (self.alpha + 1.) * math_ops.log(x) - self.beta / x) def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x):
tensorflow.python.ops.math_ops.lgamma
9,546
import tensorflow as tf def symbols_to_logits_fn(ids): """Go from ids to logits.""" ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) if "partial_targets" in features: pt = features["partial_targets"] pt_length = common_layers.shape_list(pt)[1] pt = tf.tile(pt, [1, beam_size]) pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1]) ids = tf.concat([pt, ids], axis=1) features["targets"] = ids self._coverage = None logits, _ = self(features) # pylint: disable=not-callable # now self._coverage is a coverage tensor for the first datashard.
tensorflow.tile
9,547
import tensorflow as tf mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32) start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob): with tf.variable_scope('drop_path'): batch_size = tf.shape(X)[0] noise_shape = (batch_size, 1, 1, 1) random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32)
tensorflow.equal
9,548
from tensorflow.python.ops import array_ops logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
tensorflow.python.ops.array_ops.zeros_like
9,549
import tensorflow as tf # get the representation of START start_p = tf.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = tf.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature, xlnet_config.d_model, activation=tf.tanh, kernel_initializer=initializer, name="dense_0") ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, training=is_training) cls_logits = tf.layers.dense( ans_feature, 1, kernel_initializer=initializer, name="dense_1", use_bias=False) cls_logits = tf.squeeze(cls_logits, -1) return_dict["cls_logits"] = cls_logits return return_dict
tensorflow.layers.dropout
9,550
import tensorflow as tf tf.logging.info("***** Running prediction*****")
tensorflow.logging.info
9,551
from tensorflow.python.framework import ops """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())]
tensorflow.python.framework.ops.RegisterShape
9,552
import tensorflow as tf # rois除以h,w就得到了rois在特征图上的位置 x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
tensorflow.concat
9,553
import tensorflow as tf labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights)
tensorflow.metrics.mean
9,554
import tensorflow as tf z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1])
tensorflow.reshape
9,555
import tensorflow as tf num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) dataset = dataset.map( lambda x: ( tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.attention_mask]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.labels]), tf.int32), ), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) # do transformation return d(dataset, **kwargs)
tensorflow.sparse.to_dense
9,556
import tensorflow as tf assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size) pad_x = size[0] // 2 pad_y = size[1] // 2 input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT") padding = "VALID" return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='conv' + id, use_bias=use_bias, dilation_rate=(dilation, dilation)) def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1): # zero mean conv
tensorflow.layers.conv2d
9,557
import tensorflow as tf log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights`
tensorflow.one_hot
9,558
import tensorflow as tf model_outputs['class_targets'], model_outputs['box_targets'], params) # Only training has the mask loss. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/model_builder.py # pylint: disable=line-too-long if mode == tf.estimator.ModeKeys.TRAIN and params['include_mask']: mask_loss = losses.mask_rcnn_loss( model_outputs['mask_outputs'], model_outputs['mask_targets'], model_outputs['select_class_targets'], params) else: mask_loss = 0. if variable_filter_fn: var_list = variable_filter_fn(tf.trainable_variables(), params['resnet_depth']) else: var_list = None l2_regularization_loss = _WEIGHT_DECAY * tf.add_n( [tf.nn.l2_loss(v) for v in var_list if 'batch_normalization' not in v.name and 'bias' not in v.name]) total_loss = (total_rpn_loss + total_fast_rcnn_loss + mask_loss + l2_regularization_loss)
tensorflow.trainable_variables
9,559
import tensorflow as tf """ with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0]
tensorflow.constant_initializer
9,560
import tensorflow as tf name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline.
tensorflow.FixedLenFeature
9,561
import tensorflow as tf bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if clip: log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = tf.nn.log_softmax(logits, axis=-1)
tensorflow.zeros_initializer
9,562
import tensorflow as tf word_emb = lm_embeddings["word_emb"] # [num_sentences, max_sentence_length, 512] lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1), lm_embeddings["lstm_outputs1"], lm_embeddings["lstm_outputs2"]], -1) # [num_sentences, max_sentence_length, 1024, 3] lm_emb_size = util.shape(lm_emb, 2) lm_num_layers = util.shape(lm_emb, 3) with tf.variable_scope("lm_aggregation"): self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0))) self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0)) flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]) flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1] aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb)
tensorflow.constant_initializer
9,563
import tensorflow as tf boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(tf.reshape(obj, (x_shape[0], -1, 1))) classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes))) boxes = tf.concat(boxes, axis=1) objects = tf.concat(objects, axis=1) classes = tf.concat(classes, axis=1) scores = objects * classes boxes, scores, classes, valid = tf.image.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=max_outputs,
tensorflow.concat
9,564
import tensorflow as tf with tf.variable_scope("slow_antecedent_scores"): slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1] slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c] return slow_antecedent_scores # [k, c]
tensorflow.squeeze
9,565
import tensorflow as tf # cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w]) horizon_pred = tf.matmul(pred, cur_weights) horizon_tgt = tf.matmul(tgt, cur_weights) return horizon_pred, horizon_tgt def contra_traj_lossV2(pred, tgt, horizon=9): # Step-wise contrastive loss horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred1, pred2 = tf.split(horizon_pred, 2, axis=0) tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss # randrom horizon def contra_traj_lossV3(pred, tgt, horizon=12): # Step-wise contrastive loss horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
tensorflow.where
9,566
import tensorflow as tf embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") # coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord, sess=sess) evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
tensorflow.train.start_queue_runners
9,567
import tensorflow as tf FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False,
tensorflow.logging.info
9,568
import tensorflow as tf # Old TF behavior. L_flat = tf.concat(1, [zeros, L_flat]) except TypeError: # New TF behavior L_flat = tf.concat([zeros, L_flat], 1) # Create mask that can be used to gather elements from L_flat and put them # into a lower triangular matrix.
tensorflow.concat
9,569
import tensorflow as tf g = maybe_avg(g) b = maybe_avg(b) W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 3]) # calculate convolutional layer output x = tf.nn.conv2d_transpose(x, W, target_shape, [1] + list(stride) + [1], padding=pad)
tensorflow.nn.l2_normalize
9,570
import tensorflow as tf variable_summaries(bias) conv = tf.nn.bias_add(conv, bias) tf.add_to_collection('debug_layers', conv) return conv @staticmethod def _relu(name, x): with tf.variable_scope(name): return tf.nn.relu(x) @staticmethod def _fc(name, x, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0): with tf.variable_scope(name): n_in = x.get_shape()[-1].value w = variable_with_weight_decay([n_in, output_dim], initializer, l2_strength) variable_summaries(w) if isinstance(bias, float): bias = tf.get_variable("biases", [output_dim], tf.float32, tf.constant_initializer(bias)) variable_summaries(bias)
tensorflow.contrib.layers.xavier_initializer
9,571
import tensorflow as tf return x @staticmethod def _conv(name, x, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1), initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, dilation=1.0, bias=-1): with tf.variable_scope(name): stride = [1, stride[0], stride[1], 1]
tensorflow.contrib.layers.xavier_initializer
9,572
import tensorflow as tf ''' G(x) = 1 {-(1/2)*[(x-u)/sigma]^2} ------------------- e sigma*(2*pi)^(1/2) ''' def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * tf.sqrt(2.0 * np.pi)) a = - tf.square(sample - mean) / (2.0 * tf.square(loc_std)) return Z * tf.exp(a) class ACNet: def __init__(self, scope, GRID_SIZE, a_size, trainer,TRAINING, GLOBAL_NET_SCOPE): with tf.variable_scope(str(scope)+'/qvalues'):
tensorflow.sqrt
9,573
import tensorflow as tf name='pool1' ) conv2 = tf.layers.conv2d( inputs=pool1, filters=64,
tensorflow.layers.conv2d
9,574
import tensorflow as tf self._initial_state = cell.zero_state(config.batch_size, tf.float32) state = self._initial_state outputs = [] with tf.variable_scope('RNN'): for time_step in range(self.num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def with_prefix(self, prefix, name): return '/'.join((prefix, name))
tensorflow.concat
9,575
import tensorflow as tf #### Quantity to monitor monitor_dict = {} if FLAGS.use_bfloat16: tgt_mask = tf.cast(tgt_mask, tf.float32) lm_loss = tf.cast(lm_loss, tf.float32) total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss
tensorflow.cast
9,576
from tensorflow.python.ops import math_ops if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self):
tensorflow.python.ops.math_ops.argmax
9,577
from tensorflow.python.ops import math_ops is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg)) if weights is not None: weights = math_ops.to_float(weights) weights_tiled = array_ops.tile(array_ops.reshape(
tensorflow.python.ops.math_ops.logical_and
9,578
import tensorflow as tf def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask) a = tf.expand_dims(tf.nn.softmax(s1), axis=2) res = tf.reduce_sum(a * memory, axis=1) return res def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"):
tensorflow.nn.softmax
9,579
import tensorflow as tf reg_weights: a float32 tensor with shape [num_anchors] representing regression weights """ reg_weights = tf.cast(match.matched_column_indicator(), tf.float32) matched_gt_indices = match.matched_row_indices() matched_label = tf.gather(groundtruth_labels, matched_gt_indices) matched_is_foreground = tf.cast(matched_label[:,0] <= 0, tf.float32) matched_anchor_indices = match.matched_column_indices() unmatched_ignored_anchor_indices=match.unmatched_or_ignored_column_indices() unmatched_ignored_reg_weights = tf.gather(reg_weights, unmatched_ignored_anchor_indices) reg_weights= tf.dynamic_stitch( [matched_anchor_indices, unmatched_ignored_anchor_indices],
tensorflow.gather
9,580
import tensorflow as tf # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
tensorflow.truncated_normal_initializer
9,581
import tensorflow as tf with self.test_session(graph=tf.Graph()) as sess:
tensorflow.Graph
9,582
import tensorflow as tf conv = tf.nn.atrous_conv2d(x, w, dilation, padding) else: if type(padding)==type(''): conv = tf.nn.conv2d(x, w, stride, padding) else: conv = tf.pad(x, padding, "CONSTANT") conv = tf.nn.conv2d(conv, w, stride, padding='VALID') if bias != -1: bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias)) variable_summaries(bias) conv = tf.nn.bias_add(conv, bias) tf.add_to_collection('debug_layers', conv) return conv
tensorflow.constant_initializer
9,583
import tensorflow as tf from data import DataHandler from models import ACRegNet import tensorflow as tf from utils import get_random_batch, read_config_file, create_dir RUN_IN_GPU = False def train_acregnet_model(config): tf.reset_default_graph() tf_config = tf.ConfigProto() if RUN_IN_GPU: tf_config.gpu_options.allow_growth = True sess = tf.Session(config=tf_config) train_ims, _ = DataHandler.load_images(config['train_ims_file']) train_lbs, _ = DataHandler.load_labels(config['train_lbs_file'])
tensorflow.reset_default_graph
9,584
from tensorflow.python.ops import math_ops def logits_to_predictions(self, logits, proba=False): if proba: raise ValueError( "logits to probabilities is not supported for _BinarySvmTargetColumn") logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) return math_ops.argmax(logits, 1) # TODO(zakaria): use contrib losses. def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1:
tensorflow.python.ops.math_ops.argmax
9,585
from tensorflow.python.platform import tf_logging as logging # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False
tensorflow.python.platform.tf_logging.debug
9,586
import tensorflow as tf class StackedBilstmCrfModel(Model): @classmethod def default_params(cls): default_params = { 'stacked_layers': 2 } return default_params def bilstm_layer(self, embeddings, nwords): t = tf.transpose(embeddings, perm=[1, 0, 2]) lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords) output_bw, _ = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) output = tf.concat([output_fw, output_bw], axis=-1) # transpose it back output = tf.transpose(output, perm=[1, 0, 2])
tensorflow.contrib.rnn.LSTMBlockFusedCell
9,587
import tensorflow as tf name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens') self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights')
tensorflow.placeholder
9,588
import tensorflow as tf log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes) loss = tf.reduce_mean(log_probs) self._mark_for_monitoring('loss', loss) # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss) # Add loss from auxiliary logits
tensorflow.get_collection
9,589
from tensorflow.python.training import training_ops def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): """Construct a new gradient descent optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate to use. use_locking: If True use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "GradientDescent". """ super(GradientDescentOptimizer, self).__init__(use_locking, name) self._learning_rate = learning_rate def _apply_dense(self, grad, var): return training_ops.apply_gradient_descent( var, self._learning_rate_tensor, grad, use_locking=self._use_locking).op def _apply_sparse(self, grad, var): delta = ops.IndexedSlices(grad.values * self._learning_rate_tensor, grad.indices, grad.dense_shape) return var.scatter_sub(delta, use_locking=self._use_locking) def _prepare(self): self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate, name="learning_rate")
tensorflow.python.training.training_ops.apply_gradient_descent
9,590
import tensorflow as tf padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30)
tensorflow.concat
9,591
import tensorflow as tf logit_b = tf.get_variable('b', shape=[1], initializer=tf.constant_initializer(0.0), dtype=dtype) logits = tf.squeeze(tf.nn.bias_add(tf.matmul(dnn_output, logit_w), logit_b), squeeze_dims=[1]) prediction = tf.nn.sigmoid(logits) prediction_inspect = tf.reshape(prediction, [batch_size, rnn_nunroll]) prediction_final = tf.squeeze(tf.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1]) print('logit: {}'.format(logits.get_shape())) # Compute loss
tensorflow.slice
9,592
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c) # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers flat1a = Flatten(data_format='channels_last')(conv4) #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a)
tensorflow.keras.layers.Conv2D
9,593
import tensorflow as tf """ # Reshape to use within a convolutional neural net. Last dimension is for # 'features' - it would be 1 one for a grayscale image, 3 for an RGB image, # 4 for RGBA, etc. x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels]) x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image) x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image) img_summary = tf.summary.image('Input_images', x_image) # First convolutional layer - maps one image to 32 feature maps. with tf.variable_scope('Conv_1'):
tensorflow.map_fn
9,594
import tensorflow as tf marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
tensorflow.reduce_logsumexp
9,595
import tensorflow as tf # tf placeholders and graph self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, method = 'L-BFGS-B', options = {'maxiter': 50000, 'maxfun': 50000, 'maxcor': 50, 'maxls': 50, 'ftol' : 1.0 * np.finfo(float).eps}) self.optimizer_Adam = tf.train.AdamOptimizer() self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) init = tf.global_variables_initializer() self.sess.run(init)
tensorflow.square
9,596
import tensorflow as tf self._testSampleTaskHelper(p) def testSampleTask(self): """Expected distribution: 'a': 0.8 , 'b': 0.2.""" p = self._setUpTestSampleTask() p.task_schedule = task_scheduler.ConstantScheduler.Params() p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)] self._testSampleTaskHelper(p) if __name__ == '__main__': tf.test.main()
tensorflow.test.main
9,597
import tensorflow as tf # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, batch_size=FLAGS.train_batch_size, use_hvd=FLAGS.use_hvd)
tensorflow.logging.info
9,598
import tensorflow as tf # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator
tensorflow.reduce_sum
9,599