seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf e_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper') t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper')
tensorflow.get_collection
9,000
import tensorflow as tf self.num_bilstm=num_bilstm self.lstm_size=lstm_size self.bilstm_dropout_rate=bilstm_dropout_rate @classmethod def from_dict(cls, json_object): """Constructs a `Config` from a Python dictionary of parameters.""" config = Config(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `Config` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def create_model(config, is_training, input_ids, input_mask, segment_ids,
tensorflow.gfile.GFile
9,001
import tensorflow as tf param_eta = tf.placeholder(dtype=tf.float32, shape=[], name="param_eta") param_omega = tf.placeholder(dtype=tf.float32, shape=[], name="param_omega") old_entropy = tf.placeholder(dtype=tf.float32, shape=[], name="old_entropy") varphis = tf.placeholder(dtype=tf.float32, shape=[None, None], name="varphis") Kt = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Kt") prec = tf.placeholder(dtype=tf.float32, shape=[None, None], name="prec") Waa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Waa") Wsa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="Wsa") wa = tf.placeholder(dtype=tf.float32, shape=[None, None], name="wa")
tensorflow.placeholder
9,002
import tensorflow as tf def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size)
tensorflow.train.batch
9,003
import tensorflow as tf if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: try: b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args)
tensorflow.variable_scope
9,004
import tensorflow as tf """ metric_names = list(monitor_dict.keys()) def host_call_fn(global_step, *args): """actual host call function.""" step = global_step[0] with tf.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with tf.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names): if reduce_fn is None: scalar = args[i][0] else:
tensorflow.contrib.summary.create_file_writer
9,005
from tensorflow.python.ops import math_ops # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos)
tensorflow.python.ops.math_ops.logical_not
9,006
import tensorflow as tf def _create_dummy_vars(): """Dummy vars for restore to work when not using TPU codepath.""" var_names = set([v.name for v in tf.global_variables()]) if "losses_avg/problem_0/total_loss:0" in var_names: return with tf.variable_scope("losses_avg"): with tf.variable_scope("problem_0"): for var_name in ["total", "extra", "training"]: tf.get_variable( "%s_loss" % var_name, initializer=100.0, trainable=False) with tf.variable_scope("train_stats"):
tensorflow.variable_scope
9,007
import tensorflow as tf with tf.variable_scope("temp_conv") as scope: filter_shape = [3, embedding_size, 4, 64] W = tf.get_variable(name='W_1', shape=filter_shape, initializer=he_normal, regularizer=regularizer) paddings = [[0, 0], [1, 1], [0, 0], [0, 0]] cnn_inputs = tf.pad(cnn_inputs, paddings, "CONSTANT") #print("cnn_inputs shape:", cnn_inputs.shape) inputs = tf.nn.conv2d(cnn_inputs, W, strides=[1, 1, 1, 1], padding="VALID", name="first_conv") inputs = tf.layers.batch_normalization(inputs, axis=-1, training=self.is_training) inputs = tf.nn.relu(inputs, name="first_relu") #print("temp cnn output shape:", inputs.shape) inputs = tf.squeeze(inputs, axis=2) #print("squeeze shape", inputs.shape) #inputs = tf.nn.relu(inputs) print("Temp Conv", inputs.get_shape()) self.layers.append(inputs) # Conv Block 64 for i in range(num_layers[0]):
tensorflow.nn.relu
9,008
import tensorflow as tf # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale(): with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
tensorflow.nn.softmax
9,009
import tensorflow as tf with tf.control_dependencies([tf.assert_less_equal(num_instances, self.capacity)]): indices = tf.range(self.memory_index, self.memory_index + num_instances) % self.capacity
tensorflow.range
9,010
from tensorflow.contrib.rnn.python.ops import core_rnn ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state], trainable_variables) training_op = control_flow_ops.group(*gradients)
tensorflow.contrib.rnn.python.ops.core_rnn.static_rnn
9,011
from tensorflow.python.ops import array_ops features: features dict. Returns: Loss tensor. """ target = target[self.name] if isinstance(target, dict) else target loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,)) loss_weighted = math_ops.mul( loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") class _RegressionTargetColumn(_TargetColumn): """_TargetColumn for regression.""" def __init__(self, loss_fn, label_name, weight_column_name, target_dimension): super(_RegressionTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=target_dimension, label_name=label_name,
tensorflow.python.ops.array_ops.reshape
9,012
import tensorflow as tf # Assuming we have one shard for logits. logits = tf.concat([recent_logits, logits[:, -1:]], 1) loss = sum([l for l in losses.values() if l is not None]) return samples, logits, loss # Create an initial output tensor. This will be passed # to the infer_step, which adds one timestep at every iteration. if "partial_targets" in features: initial_output = tf.to_int64(features["partial_targets"]) while len(initial_output.get_shape().as_list()) < 4: initial_output = tf.expand_dims(initial_output, 2) batch_size = common_layers.shape_list(initial_output)[0] else: batch_size = common_layers.shape_list(features["inputs"])[0] initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) # Hack: foldl complains when the output shape is less specified than the # input shape, so we confuse it about the input shape.
tensorflow.to_int64
9,013
import tensorflow as tf # # You need to modify the below cost function and optimizer so as to # implement your own pre-train method. # # ===================================================================== lambda_l2_w = 0.004 learning_rate = 0.0001 logging.info(" lambda_l2_w: %f" % lambda_l2_w) logging.info(" learning_rate: %f" % learning_rate) # Mean-square-error i.e. quadratic-cost mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1) mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean() # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1)) # mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error # mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error # Cross-entropy # ce = cost.cross_entropy(y, x_recon) # <haodong>: list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , list , Error (only be used for softmax output) # ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output) L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2]))
tensorflow.reduce_mean
9,014
import tensorflow as tf biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)):
tensorflow.constant_initializer
9,015
import tensorflow as tf References: Kolda, Tamara G., and Brett W. Bader. "Tensor decompositions and applications." SIAM review 51.3 (2009): 455-500. ''' def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes head_cnt = len(set(train_triples[:,0])) rel_cnt = len(set(train_triples[:,1])) tail_cnt = len(set(train_triples[:,2])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding matrices for entities and relationship types head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd) rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd) tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied head_init = dense_maxnorm(head_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input) # Model output
tensorflow.truncated_normal
9,016
import tensorflow as tf # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = tf.identity(x) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = tf.random_normal(x_shape, dtype=self.dtype) l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype) l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)
tensorflow.random_normal
9,017
import tensorflow as tf predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
tensorflow.logging.info
9,018
import tensorflow as tf mdl = p.Instantiate() mdl.FPropDefaultTheta() decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None) mdl.BProp() self.assertEqual(decoder_theta, mdl.theta.decoder) def testFProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) actual_var_names = [_.name for _ in tf.all_variables()]
tensorflow.set_random_seed
9,019
import tensorflow as tf def testCollectVarHistogram(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars) summary_utils.CollectVarHistogram(var_grads) def testGradientMult(self): with self.session(use_gpu=False, graph=tf.Graph()): p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars) py_utils.ApplyGradMultiplier(var_grads, -1.1) def testLRDecay(self): with self.session(use_gpu=False, graph=tf.Graph()) as sess: p = self._testParams()
tensorflow.Graph
9,020
import tensorflow as tf h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
tensorflow.nn.moments
9,021
import tensorflow as tf else: direct_mask = tf.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2)) attn_mask = tf.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld] # tensor tile rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with tf.variable_scope('attention'): # bs,sl,sl,vec f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sld,vec head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec head_etd = tf.expand_dims(head, 2) # bs,slh,1,vec
tensorflow.expand_dims
9,022
import tensorflow as tf config=run_config, params={ "batch_size": FLAGS.train_batch_size if FLAGS.do_train else FLAGS.eval_batch_size, }, ) if FLAGS.do_train and FLAGS.do_eval: tf.logging.info("***** Running training *****") tf.logging.info(" Training batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, ) eval_input_fn = input_fn_builder( input_files=input_files,
tensorflow.logging.info
9,023
import tensorflow as tf # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) sess.run(tf.global_variables_initializer())
tensorflow.train.start_queue_runners
9,024
import tensorflow as tf output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op, update_learning_rate = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, adjust_lr, use_hvd, use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps) logging_hook = tf.train.LoggingTensorHook({"loss": total_loss, "learning_rate": update_learning_rate}, every_n_iter=FLAGS.hooking_frequence) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL:
tensorflow.train.LoggingTensorHook
9,025
import tensorflow as tf def _unique_chars(filename): """Returns the used alphabet as an array of strings.""" counts = collections.Counter() with tf.gfile.Open(filename) as file_: for line in file_: counts.update(_split_string(line)) alphabet = [k for (k, _) in counts.most_common(max_size)] alphabet.sort() return np.asarray(alphabet, dtype=np.object) chars, = tf.py_func(_unique_chars, [filename], [tf.string]) char_to_id = tf.contrib.lookup.index_table_from_tensor( chars, num_oov_buckets=num_oov_buckets) id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ") return char_to_id, id_to_char def characters(filename, batch_size, sequence_size): """Returns a dataset of characters from the given file.""" def _to_chars(line): """string scalar -> Dataset of characters (string scalars).""" chars, = tf.py_func(_split_string, [line + "\n"], [tf.string]) chars.set_shape([None]) return tf.data.Dataset.from_tensor_slices(chars)
tensorflow.contrib.lookup.index_to_string_table_from_tensor
9,026
import tensorflow as tf generated_adversarial_task.append(task.goal_velocity) logger.info('Tasks dump!') assert (task_generator == 'fixed') test_summary['task'].append(task.goal_velocity) if FLAGS.task.reset_policy: # NOTE: reset policy and valuefunc logger.info("Resetting Policy") pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) tf.get_default_session().run(tf.variables_initializer(policy.parameters())) pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())]) print ("pol_params:", np.linalg.norm(pol_params), "pol_params_after_reset:", np.linalg.norm(pol_params_after)) logger.info("Resetting Valuefunc") tf.get_default_session().run(tf.variables_initializer(vfn.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters())) tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters())) for p in warmup_policy.parameters(): p.invalidate() for p in warmup_vfn.parameters(): p.invalidate() for p in policy.parameters(): p.invalidate()
tensorflow.get_default_session
9,027
import tensorflow as tf if layout != 'NCHW': need_transpose = False in_shapes = [] for layer in keras_model._input_layers: if tf.executing_eagerly(): in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape)) else: in_shapes.append(tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape))
tensorflow.executing_eagerly
9,028
import tensorflow as tf Returns: A Tensor of size 1, denoting the mean error between batches of quaternions. """ with tf.name_scope(name): logcost = log_quaternion_loss_batch(predictions, labels) logcost = tf.reduce_sum(logcost, [0])
tensorflow.name_scope
9,029
import tensorflow as tf if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a') if encoder.attn_filters: filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters]
tensorflow.log
9,030
import tensorflow as tf extra_zeros = tf.zeros((batch_size, self.max_phrase_size)) vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize]
tensorflow.tile
9,031
import tensorflow as tf Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops. """ if len(variables) != len(deltas): raise TensorforceError("Invalid variables and deltas lists.") assignments = list() for variable, delta in zip(variables, deltas): assignments.append(tf.assign_add(ref=variable, value=delta)) with tf.control_dependencies(control_inputs=assignments): return util.no_operation() def tf_minimize(self, variables, **kwargs): """ Performs an optimization step. Args: variables: List of variables to optimize. **kwargs: Additional optimizer-specific arguments. The following arguments are used by some optimizers: - arguments: Dict of arguments for callables, like fn_loss. - fn_loss: A callable returning the loss of the current model. - fn_reference: A callable returning the reference values, in case of a comparative
tensorflow.control_dependencies
9,032
import tensorflow as tf image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") annotation = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation") z = tf.placeholder(tf.float32, shape=[None, 4, 4, 128], name="z") # pred_annotation, logits = inference(image, keep_probability,z) # tf.summary.image("input_image", image, max_outputs=2) # tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) # tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, # labels=tf.squeeze(annotation, squeeze_dims=[3]), # name="entropy"))) mask_ = tf.ones([FLAGS.batch_size,64,64,3]) mask = tf.pad(mask_, [[0,0],[32,32],[32,32],[0,0]]) mask2__ = tf.ones([FLAGS.batch_size,78,78,3]) mask2_ = tf.pad(mask2__, [[0,0],[25,25],[25,25],[0,0]]) mask2 = mask2_ - mask pred_annotation, logits = inference((1-mask)*image + mask*255, keep_probability,z) tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) # loss0 = tf.reduce_mean(tf.abs(z)) loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square((image - logits)),[1,2,3])))
tensorflow.pad
9,033
import tensorflow as tf dummy_scores = tf.zeros([k, 1]) # [k, 1] for i in range(self.config["coref_depth"]): with tf.variable_scope("coref_layer", reuse=(i > 0)): top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb] top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1] top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb] attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb] with tf.variable_scope("f"): f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb] top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
tensorflow.concat
9,034
import tensorflow as tf rot = tf.reshape(tf.gather(labeled_rotations[i], [0, 2, 6, 8]), [2, 2]) min_x = tf.cast(0.0 - labeled_sizes[i][0] / 2.0, dtype=tf.float32) max_x = tf.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=tf.float32)
tensorflow.cast
9,035
import tensorflow as tf train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.global_variables_initializer() # Start running operations on the Graph. allow_soft_placement must be set to # True to build towers on GPU, as some of the ops do not have GPU # implementations. sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) for step in xrange(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time
tensorflow.ConfigProto
9,036
import tensorflow as tf loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metrics, scaffold=scaffold_fn) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold=scaffold_fn ) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator."""
tensorflow.estimator.EstimatorSpec
9,037
import tensorflow as tf parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1], other_inputs=other_inputs, training=training) attention_states, encoder_state, encoder_input_length[:1] = multi_encoder( encoder_input_length=encoder_input_length[:1], **parameters) if chaining_stop_gradient: attns = tf.stop_gradient(attns) states = tf.stop_gradient(states) decoder_outputs = tf.stop_gradient(decoder_outputs) if chaining_strategy == 'concat_attns': attention_states[0] = tf.concat([attention_states[0], attns], axis=2) elif chaining_strategy == 'concat_states': attention_states[0] = tf.concat([attention_states[0], states], axis=2) elif chaining_strategy == 'sum_attns': attention_states[0] += attns
tensorflow.stop_gradient
9,038
import tensorflow as tf add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
tensorflow.dtypes.as_string
9,039
import tensorflow as tf """Build the inference graph using CUDNN cell.""" inputs = tf.transpose(inputs, [1, 0, 2])
tensorflow.transpose
9,040
import tensorflow as tf shapes = {movielens.USER_COLUMN: tf.TensorShape([batch_size]), movielens.ITEM_COLUMN: tf.TensorShape([batch_size])}
tensorflow.TensorShape
9,041
import tensorflow as tf x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config):
tensorflow.exp
9,042
import tensorflow as tf have_data = True load_data_during_test() logger.info(f'Load the task{snapshot} saver!') saver.load_state_dict(np.load(f'./{setting}/{taskname}.task{snapshot}.saver.npy', allow_pickle=True)[()]) logger.info('Update all copies! (lazymodel, normalizers_copy)') tf.get_default_session().run(sync_model_to_lazymodel) tf.get_default_session().run(copy_normalizers) logger.info('Loaded normalizers:') load_norm = tf.get_default_session().run(normalizers_parameters) logger.info(load_norm) TASK_NUM = train_tasknum TEST_TASK_NUM = 0 ########################## debug #########################
tensorflow.get_default_session
9,043
import tensorflow as tf output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (tf.Tensor) A single value tensor containing the loss. """ loss = None with tf.name_scope(name, "click_softmax_cross_entropy",[output]): label_dis = labels*propensity_weights / tf.reduce_sum(labels*propensity_weights, 1, keep_dims=True) loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels*propensity_weights, 1) return tf.reduce_sum(loss) / tf.reduce_sum(labels*propensity_weights) def click_loglikelihood(self, labels, propensity,train_output, name=None): """Computes listwise softmax loss with propensity weighting. Args: output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope.
tensorflow.nn.softmax_cross_entropy_with_logits
9,044
import tensorflow as tf self.MINIBATCH = 64 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params = self.build_anet(batch['state'], 'oldpi') pi, pi_params = self.build_anet(batch['state'], 'pi') pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True)
tensorflow.placeholder
9,045
import tensorflow as tf len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
tensorflow.logging.info
9,046
from tensorflow.python.framework import ops in which case a different quantized type may be used. data_format: A string. 'NHWC' and 'NCHW" are supported. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.op_scope([value, bias], name, "BiasAdd") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
tensorflow.python.framework.ops.op_scope
9,047
import tensorflow as tf def main(job_dir, **args): ##Setting up the path for saving logs logs_dir = job_dir + 'logs/' data_dir = "gs://deeplearningteam11/data" print("Current Directory: " + os.path.dirname(__file__)) print("Lets copy the data to: " + os.path.dirname(__file__)) os.system("gsutil -m cp -r " + data_dir + " " + os.path.dirname(__file__) + " > /dev/null 2>&1 " ) #exit(0) with tf.device('/device:GPU:0'): # 1: Build the Keras model. K.clear_session() # Clear previous models from memory. model = ssd_300(image_size=(img_height, img_width, img_channels), n_classes=n_classes, mode='training', l2_regularization=0.0005, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1,
tensorflow.device
9,048
import tensorflow as tf querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
tensorflow.shape
9,049
import tensorflow as tf
tensorflow.reduce_sum
9,050
import tensorflow as tf self.loss_func = self.click_weighted_softmax_cross_entropy_loss elif self.hparams.loss_func == 'click_weighted_log_loss': self.loss_func = self.click_weighted_log_loss elif self.hparams.loss_func == 'click_weighted_pairwise_loss': self.loss_func = self.click_weighted_pairwise_loss else: # softmax loss without weighting self.loss_func = self.softmax_loss # Compute rank loss reshaped_train_labels = tf.transpose(tf.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size] self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity)) self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights) pw_list = tf.unstack(self.propensity_weights, axis=1) # Compute propensity weights self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\ self.propensity,train_output) tf.summary.scalar('click_metrics',self.click_metrics,collections=['train']) for i in range(len(pw_list)): tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train']) tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train']) # Compute examination loss self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output)) self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights) rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights
tensorflow.unstack
9,051
import tensorflow as tf if use_bfloat16: preprocessed_resized_image = tf.cast( preprocessed_resized_image, tf.bfloat16) tensor_dict[fields.InputDataFields.image] = tf.squeeze( preprocessed_resized_image, axis=0) tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze(
tensorflow.squeeze
9,052
from tensorflow.python.framework import tensor_shape return_dims.append(dim_x) elif dim_x.value == dim_y.value: # The dimensions are compatible, so output is the same size in that # dimension. return_dims.append(dim_x.merge_with(dim_y)) else: raise ValueError("Incompatible shapes for broadcasting: %s and %s" % (shape_x, shape_y)) return [tensor_shape.TensorShape(return_dims)] @ops.RegisterShape("AddN") def _AddNShape(op): merged_shape = tensor_shape.unknown_shape() for input_ in op.inputs: merged_shape = merged_shape.merge_with(input_.get_shape()) return [merged_shape] @ops.RegisterShape("Select") def _SelectShape(op): # All three inputs must have the same shape. return [op.inputs[0].get_shape() .merge_with(op.inputs[1].get_shape()) .merge_with(op.inputs[2].get_shape())]
tensorflow.python.framework.tensor_shape.unknown_shape
9,053
import tensorflow as tf cost = rewards ** 2 cost = tf.reduce_sum(cost * weights, axis=1)
tensorflow.reduce_sum
9,054
import tensorflow as tf [0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2]) return masks def test_map_labels_to_0_to_n1(self): labels = tf.constant([[-1, 2, 5], [0, 9, 1]], dtype=tf.int32) labels_0_n = isu.map_labels_to_0_to_n(labels) expected_labels_0_n = tf.constant([[-1, 2, 3], [0, 4, 1]], dtype=tf.int32) self.assertAllEqual(labels_0_n.numpy(), expected_labels_0_n.numpy()) def test_map_labels_to_0_to_n2(self): labels = tf.constant([[-1, 1, 2], [1, 1, 2]], dtype=tf.int32) labels_0_n = isu.map_labels_to_0_to_n(labels) expected_labels_0_n = tf.constant([[-1, 0, 1], [0, 0, 1]], dtype=tf.int32)
tensorflow.constant
9,055
import tensorflow as tf #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with tf.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train) #q,k,v [-1,n_ctx,emb] q, k, v = tf.split(c, 3, 2) #q [-1,head,n_ctx,emb] v [-1,head,emb,n_ctx] v [-1,head,n_ctx,emb] q = split_heads(q, n_head) k = split_heads(k, n_head, k=True) v = split_heads(v, n_head) #a [-1,head,n_ctx,emb] a = _attn(q, k, v, train=train, scale=scale) #a [-1,n_ctx,head,emb] a = merge_heads(a) #a [-1,n_ctx,emb] a = conv1d(a, 'c_proj', n_state, 1, train=train) a = dropout(a, resid_pdrop, train) return a def mlp(x, scope, n_state, train=False):
tensorflow.split
9,056
import tensorflow as tf import numpy as np import tensorflow as tf from collections import deque def sample(logits): noise = tf.random_uniform(tf.shape(logits)) return tf.argmax(logits - tf.log(-tf.log(noise)), 1) def cat_entropy(logits): a0 = logits - tf.reduce_max(logits, 1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, 1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
tensorflow.reduce_max
9,057
from tensorflow.python.framework import constant_op @property def beta(self): """Scale parameter.""" return self._beta def _batch_shape(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.alpha), array_ops.shape(self.beta)) def _get_batch_shape(self): return array_ops.broadcast_static_shape( self.alpha.get_shape(), self.beta.get_shape()) def _event_shape(self): return constant_op.constant([], dtype=dtypes.int32) def _get_event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed) def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) - math_ops.lgamma(self.alpha) -
tensorflow.python.framework.constant_op.constant
9,058
import tensorflow as tf # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all()
tensorflow.reduce_max
9,059
import tensorflow as tf else: return tf.assert_equal(shape_a, shape_b)
tensorflow.assert_equal
9,060
import tensorflow as tf def get_conv_dimension(filter_list): with tf.Graph().as_default(): with tf.Session() as sess:
tensorflow.Graph
9,061
from tensorflow.python.ops import array_ops self._target_column.num_label_columns)], array_ops.reshape(centered_bias, [-1]))
tensorflow.python.ops.array_ops.reshape
9,062
import tensorflow as tf sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var)) const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer) print(sess.run(const_var)) print(sess.run(const_fill_var)) linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end sess.run(linear_var.initializer) sess.run(sequence_var.initializer) print(sess.run(linear_var)) print(sess.run(sequence_var)) rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4) print(sess.run(rnorm_var)) print(sess.run(runif_var)) ops.reset_default_graph() sess = tf.Session() my_var = tf.Variable(tf.zeros([1,20]))
tensorflow.linspace
9,063
import tensorflow as tf y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float( tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) z0_valid = tf.to_float( tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *
tensorflow.less_equal
9,064
import tensorflow as tf self.char_mat = tf.get_variable( "char_mat", initializer=tf.constant(char_mat, dtype=tf.float32)) self.c_mask = tf.cast(self.c, tf.bool) self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) if opt: # we have to hardcode the max batch size here! use the batch size from the generator as this will be used for PG N, CL = config.batch_size if not self.demo else config.batch_size, config.char_limit self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen]) self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen]) self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL]) self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL]) self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1) self.y2 = tf.argmax(tf.slice(self.y2, [0, 0], [N, self.c_maxlen]),axis=-1) else: self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit
tensorflow.reduce_max
9,065
import tensorflow as tf use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride # from slim.convolution2d_transpose def get_deconv_dim(dim_size, stride_size, kernel_size, padding): dim_size *= stride_size if padding == 'VALID' and dim_size is not None: dim_size += max(kernel_size - stride_size, 0) return dim_size # caculate output shape batch_size = tf.shape(inputs)[0] height = tf.shape(inputs)[1] width = tf.shape(inputs)[2] out_height = get_deconv_dim(height, stride_h, kernel_h, padding) out_width = get_deconv_dim(width, stride_w, kernel_w, padding) output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0) outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases)
tensorflow.shape
9,066
import tensorflow as tf "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else:
tensorflow.contrib.tpu.TPUEstimatorSpec
9,067
import tensorflow as tf logcost = log_quaternion_loss_batch(predictions, labels) logcost = tf.reduce_sum(logcost, [0]) logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') return logcost
tensorflow.multiply
9,068
import tensorflow as tf with tf.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q.
tensorflow.placeholder
9,069
import tensorflow as tf if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_train_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) if FLAGS.recover_dir is not None: if FLAGS.use_hvd: FLAGS.recover_dir = FLAGS.recover_dir if hvd.rank() == 0 else os.path.join(FLAGS.recover_dir, str(hvd.rank())) path_ckpt = os.path.join(FLAGS.output_dir, "checkpoint") path_ckpt_input = os.path.join(FLAGS.output_dir, "checkpoint_input") if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt): with tf.gfile.GFile(path_ckpt, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) if FLAGS.ckpt_no_input is not None and not tf.gfile.Exists(path_ckpt_input): with tf.gfile.GFile(path_ckpt_input, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval): (cpath, cname) = os.path.split(FLAGS.bert_config_file) tf.gfile.Copy(FLAGS.bert_config_file, os.path.join(FLAGS.output_dir, cname), True)
tensorflow.gfile.Exists
9,070
import tensorflow as tf rotations_y = tf.reshape(rotations_y, [-1, 1]) labeled_boxes = tf.concat([sample['translations_3d'], sample['sizes_3d'], rotations_y], axis=1) # Get predicted boxes predicted_boxes = detections['detection_boxes'] if metric.threed: rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) predicted_boxes = tf.concat([detections['translations_3d'], detections['sizes_3d'], rotations_y], axis=1) labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) predicted_classes = tf.cast(detections['detection_classes'], tf.int64)
tensorflow.reshape
9,071
import tensorflow as tf # pylint: disable=g-long-lambda @test_case.named_parameters( dict( testcase_name='fixed_len_int', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.int64, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}), dict( testcase_name='fixed_len_string', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.string, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.string)}), dict( testcase_name='fixed_len_float', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.float32, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)}), dict( testcase_name='override', make_tensors_fn=_make_tensors_with_override, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}, domains={'x': schema_pb2.IntDomain(is_categorical=True)}),
tensorflow.io.FixedLenFeature
9,072
import tensorflow as tf rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1) rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1) rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear rep_map = bn_dense_layer(rep_tensor_split, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # bs,bn,bl,vec rep_map_tile = tf.tile(tf.expand_dims(rep_map, 2), [1, 1, block_len, 1, 1]) # bs,bn,bl,bl,vec # rep_map_dp = dropout(rep_map, keep_prob, is_train) bn = block_num bl = block_len with tf.variable_scope('self_attention'): # @2.self-attention in block # mask generation sl_indices = tf.range(block_len, dtype=tf.int32) sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices) if direction == 'forward': direct_mask = tf.greater(sl_row, sl_col) # bl,bl else: direct_mask = tf.greater(sl_col, sl_row) # bl,bl direct_mask_tile = tf.tile( tf.expand_dims(tf.expand_dims(direct_mask, 0), 0), [bs, bn, 1, 1]) # bs,bn,bl,bl rep_mask_tile_1 = tf.tile(tf.expand_dims(rep_mask_split, 2), [1, 1, bl, 1]) # bs,bn,bl,bl rep_mask_tile_2 = tf.tile(tf.expand_dims(rep_mask_split, 3), [1, 1, 1, bl]) # bs,bn,bl,bl rep_mask_tile = tf.logical_and(rep_mask_tile_1, rep_mask_tile_2)
tensorflow.variable_scope
9,073
import tensorflow as tf nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x
tensorflow.nn.sigmoid
9,074
import tensorflow as tf s_h0, s_h1, s_h2, s_h3 = \ int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3) s_w0, s_w1, s_w2, s_w3 = \ int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3) def decode(z, skip_h3, skip_h2, skip_h1, skip_h0): z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin')) h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob) import IPython IPython.embed() h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3), [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3)) h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3), [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2))
tensorflow.reshape
9,075
import tensorflow as tf x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) x2 = tf.where(z2 > self.epsilon, x2, z2) ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj)) return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
tensorflow.zeros_like
9,076
import tensorflow as tf "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if clip: log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss)
tensorflow.reshape
9,077
import tensorflow as tf # Create the policy # first return value corresponds to deterministic actions # policy_out corresponds to stochastic actions, used for training # logp_pi is the log probability of actions taken by the policy self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph) # Monitor the entropy of the policy, # this is not used for training self.entropy = tf.reduce_mean(self.policy_tf.entropy) self.obs_ph, self.actions_ph, self.deterministic_actions_ph = self._get_pretrain_placeholders() # Use two Q-functions to improve performance by reducing overestimation bias. qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph, create_qf=True, create_vf=True) qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
tensorflow.reduce_mean
9,078
import tensorflow as tf eps = 1e-6 # Notation: (var) = batch variable, (var)s = sequence variable, # (var)_i = variable index by action at step i # shape is [n_envs * (n_steps + 1)] if continuous: value = train_model.value_flat else: value = tf.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1) rho, rho_i_ = None, None if continuous: action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps) distribution_f = tf.contrib.distributions.MultivariateNormalDiag( loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps), scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps))
tensorflow.reduce_sum
9,079
import tensorflow as tf def state_update_aggregation_modes(self): """See base class.""" return { self.NORM_STATE_UPDATE_KEY: encoding_stage.StateAggregationMode.STACK } def initial_state(self): """See base class.""" return {self.FACTOR_STATE_KEY: tf.constant(1.0)} # pylint: disable=g-doc-args,g-doc-return-or-yield def update_state(self, state, state_update_tensors): """Updates the state (see base class). This method illustrates how the implementation can handle state update based on a single encoding, or based on a multiple encodings collectively.
tensorflow.constant
9,080
import tensorflow as tf >>> samples.dtype dtype('float64') >>> m.dtype = tf.float32 >>> samples = m.compute_prior_samples(test_points, 1, 2) >>> samples.dtype dtype('float32') """ mu, var = self.build_prior_mean_var(test_points, num_latent, True) jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) @autoflow((tf.float64, [None, None]), (tf.float64, [None, None]), (tf.float64, [None, None])) def compute_posterior_mean_var(self, X, Y, test_points): """Computes the means and variances of the posterior(s). This is just an autoflowed version of
tensorflow.shape
9,081
from tensorflow.python.framework import tensor_shape filter_shape = tensor_util.constant_value(op.inputs[1]) if filter_shape is not None: return [tensor_shape.TensorShape(filter_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("DepthwiseConv2dNativeBackpropInput") def _DepthwiseConv2dNativeBackpropInputShape(op): """Shape function for the DepthwiseConv2dNativeBackpropInput op.""" input_shape = tensor_util.constant_value(op.inputs[0]) if input_shape is not None: return [tensor_shape.TensorShape(input_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("MaxPoolGrad") @ops.RegisterShape("MaxPoolGradWithArgmax") def _MaxPoolGradShape(op): """Shape function for the MaxPoolGrad op.""" orig_input_shape = op.inputs[0].get_shape().with_rank(4) return [orig_input_shape] @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D."""
tensorflow.python.framework.tensor_shape.unknown_shape
9,082
import tensorflow as tf n_lstm_layers = self.options['lstm'].get('n_layers', 1) cell_clip = self.options['lstm'].get('cell_clip') proj_clip = self.options['lstm'].get('proj_clip') use_skip_connections = self.options['lstm']['use_skip_connections'] if use_skip_connections: print("USING SKIP CONNECTIONS") else: print("NOT USING SKIP CONNECTIONS") # the sequence lengths from input mask if self.use_character_inputs: mask = tf.reduce_any(self.ids_placeholder > 0, axis=2) else: mask = self.ids_placeholder > 0 sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) batch_size = tf.shape(sequence_lengths)[0] # for each direction, we'll store tensors for each layer self.lstm_outputs = {'forward': [], 'backward': []} self.lstm_state_sizes = {'forward': [], 'backward': []} self.lstm_init_states = {'forward': [], 'backward': []} self.lstm_final_states = {'forward': [], 'backward': []} update_ops = [] for direction in ['forward', 'backward']: if direction == 'forward': layer_input = self.embedding else:
tensorflow.cast
9,083
import tensorflow as tf surrogate_type: Either 'xent' or 'hinge', specifying which upper bound should be used for indicator functions. Returns: A `Tensor` of shape [num_labels] or [num_labels, num_anchors]. """ maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) loss_on_negatives = losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2 return tf.reduce_sum(weights * loss_on_negatives, 0)
tensorflow.cast
9,084
import tensorflow as tf images = tf.reshape(images, shape=images_shape) gpu_compute_stage_ops.append(gpu_compute_stage_op) else: # Minor hack to avoid H2D copy when using synthetic data images = tf.truncated_normal( host_images.get_shape(), dtype=input_data_type, stddev=1e-1, name='synthetic_images') images = tf.contrib.framework.local_variable( images, name='gpu_cached_images') labels = host_labels with tf.device(self.devices[device_num]): # Rescale to [0, 1) images *= 1. / 256 # Rescale to [-1,1] instead of [0, 1) images = tf.subtract(images, 0.5) images = tf.multiply(images, 2.0) if self.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2]) if input_data_type != data_type: images = tf.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type)
tensorflow.device
9,085
import tensorflow as tf self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2) self.conv4 = tf.layers.conv2d(self.pool3, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=tf.nn.relu) self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
tensorflow.layers.conv2d
9,086
import tensorflow as tf saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state('./model_pretrain') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("loading checkpoint...") saver.restore(sess, ckpt.model_checkpoint_path)
tensorflow.train.get_checkpoint_state
9,087
import tensorflow as tf if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with tf.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train) #q,k,v [-1,n_ctx,emb] q, k, v = tf.split(c, 3, 2) #q [-1,head,n_ctx,emb] v [-1,head,emb,n_ctx] v [-1,head,n_ctx,emb] q = split_heads(q, n_head) k = split_heads(k, n_head, k=True)
tensorflow.variable_scope
9,088
import tensorflow as tf 'near_surface_samples': _float_feature(d['near_surface_samples']), 'grid': _float_feature(d['grid']), 'world2grid': _float_feature(d['world2grid']), 'surface_point_samples': _float_feature(d['surface_point_samples']) } example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32), 'mesh_name': tf.io.FixedLenFeature([], tf.string), 'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32), 'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32), 'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32), 'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32) } def parse_tf_example(example_proto): d = tf.io.parse_single_example(example_proto, full_featurespec()) return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'], d['near_surface_samples'], d['grid'], d['world2grid'], d['surface_point_samples']) def _example_dict_tf_func_wrapper(mesh_orig_path): mesh_orig_path = mesh_orig_path.decode(sys.getdefaultencoding())
tensorflow.io.FixedLenFeature
9,089
import tensorflow as tf out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts)
tensorflow.nn.relu
9,090
import tensorflow as tf self.action = tf.squeeze(self.normal_dist.sample(1),axis=0); self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1])
tensorflow.clip_by_value
9,091
import tensorflow as tf y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0)
tensorflow.squeeze
9,092
import tensorflow as tf def main(_): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir,
tensorflow.gfile.MakeDirs
9,093
import tensorflow as tf """ matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices] blocked_rows = tf.Dimension(0) blocked_cols = tf.Dimension(0) batch_shape = tf.TensorShape(None) for matrix in matrices: full_matrix_shape = matrix.get_shape().with_rank_at_least(2) batch_shape = batch_shape.merge_with(full_matrix_shape[:-2]) blocked_rows += full_matrix_shape[-2] blocked_cols += full_matrix_shape[-1] ret_columns_list = [] for matrix in matrices: matrix_shape = tf.shape(matrix) ret_columns_list.append(matrix_shape[-1]) ret_columns = tf.add_n(ret_columns_list) row_blocks = [] current_column = 0 for matrix in matrices: matrix_shape = tf.shape(matrix) row_before_length = current_column current_column += matrix_shape[-1] row_after_length = ret_columns - current_column row_blocks.append(tf.pad( tensor=matrix, paddings=tf.concat( [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32), [(row_before_length, row_after_length)]], axis=0))) blocked = tf.concat(row_blocks, -2)
tensorflow.add_n
9,094
import tensorflow as tf parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = tf.reduce_mean(pred, axis=1) traj_tgt = tf.reduce_mean(tgt, axis=1) p1, p2 = tf.split(traj_pred, 2, axis=0) t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12): bs, epi_len = input.shape[:2] new_w = epi_len - horizon + 1 weights = np.zeros([epi_len, new_w]) for i in range(new_w): weights[i:i + horizon, i] = 1.0
tensorflow.tanh
9,095
from tensorflow.python.ops import variable_scope """ with variable_scope.variable_scope(name, 'mean_iou', [predictions, labels]):
tensorflow.python.ops.variable_scope.variable_scope
9,096
import tensorflow as tf def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.PREDICT: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=masked_lm_example_loss, scaffold_fn=scaffold_fn) # 输出mask_word的score return output_spec return model_fn
tensorflow.logging.info
9,097
import tensorflow as tf self.assertEqual(len(grads), len(vars_)) for grad, var in zip(grads, vars_): if grad is not None: self.assertEqual(grad.shape, var.shape) def test_training_graph(self): """Test model training in graph mode.""" with tf.Graph().as_default(): config = config_.get_hparams_cifar_38() config.add_hparam("n_classes", 10) config.add_hparam("dataset", "cifar-10") x = tf.random_normal( shape=(self.config.batch_size,) + self.config.input_shape)
tensorflow.Graph
9,098
import tensorflow as tf return tf.reduce_mean(tf.square(uP)) def conv2d(img, w, b, strides=[1, 1, 1, 1], is_dilated=False): if is_dilated: layer = tf.nn.atrous_conv2d(img, w, rate=2, padding='SAME') + b else: layer = tf.nn.conv2d(img, w, strides=strides, padding='SAME') + b return layer def dropout(layer, keep_prob=0.9, is_training=True, name=None, selu=False): if selu: return dropout_selu(layer, 1.0 - keep_prob, name=name, training=is_training) if is_training: return tf.nn.dropout(layer, keep_prob=keep_prob, name=name) else: return tf.add(layer, 0, name=name) def norm(layer, norm_type='batch_norm', decay=0.9, id=0, is_training=True, activation_fn=tf.nn.relu, prefix='conv_'): if norm_type != 'batch_norm' and norm_type != 'layer_norm': return tf.nn.relu(layer) with tf.variable_scope('norm_layer_%s%d' % (prefix, id)) as vs: if norm_type == 'batch_norm': if is_training: try: layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs) # updates_collections=None
tensorflow.nn.dropout
9,099