seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) else: probabilities = logits logits = tf.squeeze(logits, [-1]) predictions = logits per_example_loss = tf.square(logits - labels) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions) def model_fn_builder(config, num_labels, init_checkpoint, learning_rate,
tensorflow.reduce_sum
9,200
import tensorflow as tf num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(
tensorflow.logging.info
9,201
import tensorflow as tf with tf.variable_scope(self.model_scope): # loss & extra evaluation metrics logits = self.forward_train(images) self.maskable_var_names = [var.name for var in self.maskable_vars] loss, metrics = self.calc_loss(labels, logits, self.trainable_vars) if FLAGS.enbl_dst: loss += self.helper_dst.calc_loss(logits, logits_dst) tf.summary.scalar('loss', loss) for key, value in metrics.items(): tf.summary.scalar(key, value) # learning rate schedule self.global_step = tf.train.get_or_create_global_step() lrn_rate, self.nb_iters_train = self.setup_lrn_rate(self.global_step) # overall pruning ratios of trainable & maskable variables pr_trainable = calc_prune_ratio(self.trainable_vars) pr_maskable = calc_prune_ratio(self.maskable_vars)
tensorflow.summary.scalar
9,202
import tensorflow as tf out = conv(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init) h_stack1, h_stack2 = tf.split(out, 2, 3) sigmoid_out = tf.sigmoid(h_stack2) out = (h_stack1 * sigmoid_out) out_shp = out.get_shape().as_list() if out_shp[1:-1] < in_shp[1:-1]: x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME') elif out_shp[1:-1] > in_shp[1:-1]: warnings.warn("The height and width of the output are larger than the input. There will be no residual connection.") residual = False if out_shp[-1] > in_shp[-1]: x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, int(dim[0] - in_shp[-1])]])
tensorflow.nn.avg_pool
9,203
import tensorflow as tf "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids":
tensorflow.constant
9,204
import tensorflow as tf alpha = tf.nn.softmax(out_att) context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D)
tensorflow.expand_dims
9,205
import tensorflow as tf # case: train mode (uses stats of the current batch) mean = tf.reduce_mean(_x, axis=reduction_axes) brodcast_mean = tf.reshape(mean, broadcast_shape) std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes) std = tf.sqrt(std)
tensorflow.reshape
9,206
import tensorflow as tf Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ) @property def observ(self): """Access the variable holding the current observation.""" return self._observ @property def action(self): """Access the variable holding the last received action.""" return self._action @property def reward(self):
tensorflow.identity
9,207
import tensorflow as tf width = tf.shape(inputs)[2] out_height = get_deconv_dim(height, stride_h, kernel_h, padding) out_width = get_deconv_dim(width, stride_w, kernel_w, padding) output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0) outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: # outputs = batch_norm_for_conv2d(outputs, is_training, # bn_decay=bn_decay, scope='bn') outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) if activation_fn is not None: # outputs = activation_fn(outputs) outputs = tf.nn.leaky_relu(outputs, alpha=0.2) return outputs
tensorflow.nn.bias_add
9,208
import tensorflow as tf import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import tensorflow as tf tf.get_logger().setLevel('INFO') tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) # For ACER def get_by_index(input_tensor, idx): """ Return the input tensor, offset by a certain value :param input_tensor: (TensorFlow Tensor) The input tensor
tensorflow.get_logger
9,209
import tensorflow as tf self._beta = None if self._scale: self._set_default_initializer(self.GAMMA) self._gamma = tf.get_variable( self.GAMMA, shape=self._mean_shape, initializer=self._initializers[self.GAMMA])
tensorflow.get_variable
9,210
import tensorflow as tf feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f
tensorflow.python_io.TFRecordWriter
9,211
import tensorflow as tf return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def lm_token_preprocessing(dataset, training): """Concatenates inputs, 0, targets, with masking only for targets.""" del training def concat_and_add_mask(x): inp = x['inputs'] targets = x['targets'] pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0) concat = tf.concat([inp, pad, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0) x['inputs'] = concat x['targets'] = concat x['mask'] = mask return x dataset = dataset.map(concat_and_add_mask) return dataset @gin.configurable(module='trax.data', denylist=['hparams']) def bair_robot_pushing_hparams(hparams=None, video_num_input_frames=1,
tensorflow.zeros_like
9,212
import tensorflow as tf scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1)
tensorflow.expand_dims
9,213
import tensorflow as tf cand_features.append(cand_states) cand_choices.append(cand_choice) cand_scoress.append(cand_scores) n_cands_per_sample = [v.shape[0] for v in cand_features] cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False) cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False) cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False) n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False) return cand_features, n_cands_per_sample, cand_choices, cand_scoress def padding(output, n_vars_per_sample, fill=-1e8): n_vars_max = tf.reduce_max(n_vars_per_sample) output = tf.split( value=output, num_or_size_splits=n_vars_per_sample, axis=1, ) output = tf.concat([ tf.pad( x, paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]], mode='CONSTANT', constant_values=fill) for x in output ], axis=0)
tensorflow.reduce_max
9,214
import tensorflow as tf } v_grad_clip_fn = opt.get_gradient_clip_fn(hparams) v_grads_and_vars = v_grad_clip_fn(grads_and_vars) v_grads, _ = zip(*v_grads_and_vars) v_grads_true = tf.clip_by_value(grads, hparams["kwargs"]["clip_value_min"], hparams["kwargs"]["clip_value_max"])
tensorflow.clip_by_value
9,215
import tensorflow as tf end_points[scope] = net logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID', normalizer_fn=None, activation_fn=None) logits = tf.reshape(logits, [-1, 1]) end_points['logits'] = logits return logits, end_points
tensorflow.reshape
9,216
import tensorflow as tf def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
tensorflow.train.Features
9,217
import tensorflow as tf batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_) res = [elem[0, :, :, :] for elem in res] res = [tf.image.random_flip_left_right(elem) for elem in res] res = [tf.reshape(elem, [1, height, width, channels]) for elem in res] res = tf.concat(axis=0, values=res) return res # build a one hot representation corresponding to the integer tensor # the one-hot dimension is appended to the integer tensor shape def as_one_hot(input_, n_indices):
tensorflow.concat
9,218
import tensorflow as tf order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m)
tensorflow.zeros_like
9,219
import tensorflow as tf with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****")
tensorflow.logging.info
9,220
import tensorflow as tf for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss",
tensorflow.summary.histogram
9,221
import tensorflow as tf if decoder.old_maxout: # for back-compatibility with old models output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX', padding='SAME', strides=[2]) output_ = tf.squeeze(output_, axis=2) else: output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1)) if decoder.pred_embed_proj: # intermediate projection to embedding size (before projecting to vocabulary size) # this is useful to reduce the number of parameters, and
tensorflow.split
9,222
import tensorflow as tf outputs_to_scales_to_logits[output][ 'logits_%.2f' % image_scale] = outputs_to_logits[output] # Merge the logits from all the multi-scale inputs. for output in sorted(model_options.outputs_to_num_classes): # Concatenate the multi-scale logits for each output type. all_logits = [ tf.expand_dims(logits, axis=4) for logits in outputs_to_scales_to_logits[output].values() ] all_logits = tf.concat(all_logits, 4) merge_fn = ( tf.reduce_max if model_options.merge_method == 'max' else tf.reduce_mean)
tensorflow.expand_dims
9,223
import tensorflow as tf sparse_ops._take_many_sparse_from_tensors_map) # pylint: enable=protected-access class SparseTensorsMapTest(tf.test.TestCase): def _SparseTensorPlaceholder(self, dtype=None): if dtype is None: dtype = tf.int32 return tf.SparseTensor( tf.placeholder(tf.int64), tf.placeholder(dtype), tf.placeholder(tf.int64)) def _SparseTensorValue_5x6(self, permutation): ind = np.array([ [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]).astype(np.int64) val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
tensorflow.placeholder
9,224
import tensorflow as tf # Handle BN scope reuse if self.reuse: self.scope_reuse = tf.AUTO_REUSE else: self.scope_reuse = None self.param_initializer = { 'moving_mean': tf.constant_initializer(0., dtype=self.dtype), 'moving_variance': tf.constant_initializer(1., dtype=self.dtype), 'gamma': tf.constant_initializer(0.1, dtype=self.dtype) } self.param_trainable = { 'moving_mean': False,
tensorflow.constant_initializer
9,225
import tensorflow as tf horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2
tensorflow.reshape
9,226
import tensorflow as tf states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled,
tensorflow.reshape
9,227
import tensorflow as tf z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond( tf.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64)) ) vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1),
tensorflow.string_join
9,228
import tensorflow as tf @layer def sigmoid_cross_entropy_layer(tensor, target, **opts): out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target) return out @layer def mean_loss_by_example_layer(tensor, sequence_length, **opts): loss = tf.div( tf.reduce_sum(tensor, axis=1), tf.cast(sequence_length, dtype=tf.float32) ) out = tf.reduce_mean(loss) tf.summary.scalar('cost', out) return out @layer
tensorflow.reduce_sum
9,229
import tensorflow as tf Training: {spacer}Positive count: {train_pos_ct} {spacer}Batch size: {train_batch_size} {multiplier} {spacer}Batch count per epoch: {train_batch_ct} Eval: {spacer}Positive count: {eval_pos_ct} {spacer}Batch size: {eval_batch_size} {multiplier} {spacer}Batch count per epoch: {eval_batch_ct}""" _TRAIN_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.MASK_START_INDEX: tf.FixedLenFeature([1], dtype=tf.string), "labels": tf.FixedLenFeature([], dtype=tf.string), } _EVAL_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string) }
tensorflow.FixedLenFeature
9,230
import tensorflow as tf if chaining_stop_gradient: attns = tf.stop_gradient(attns) states = tf.stop_gradient(states) decoder_outputs = tf.stop_gradient(decoder_outputs) if chaining_strategy == 'concat_attns': attention_states[0] = tf.concat([attention_states[0], attns], axis=2) elif chaining_strategy == 'concat_states': attention_states[0] = tf.concat([attention_states[0], states], axis=2) elif chaining_strategy == 'sum_attns': attention_states[0] += attns elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'): if chaining_strategy == 'map_attns': x = attns elif chaining_strategy == 'map_outputs': x = decoder_outputs else:
tensorflow.concat
9,231
import tensorflow as tf def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs_raw"] # Axis 0 - Batch. # Axis 1 - Input Frames, 4 frames.
tensorflow.layers.dense
9,232
import tensorflow as tf """Discriminator layer""" with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.get_variable_scope
9,233
import tensorflow as tf # We use sampled softmax so we keep output projection separate. w = tf.get_variable("proj_w", [24, classes])
tensorflow.get_variable
9,234
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable. Note: Labels must be integer class indices.""" # TODO(roumposg): Remove when deprecated monitors are removed. hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks
9,235
import tensorflow as tf #) # Kim et al 2015, +/- 0.05 w_init = tf.random_uniform_initializer( minval=-0.05, maxval=0.05) elif cnn_options['activation'] == 'tanh': # glorot init w_init = tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / (width * char_embed_dim)) ) w = tf.get_variable( "W_cnn_%s" % i, [1, width, char_embed_dim, num], initializer=w_init, dtype=DTYPE) b = tf.get_variable( "b_cnn_%s" % i, [num], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d( inp, w,
tensorflow.get_variable
9,236
import tensorflow as tf loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1) num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1]) gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1)) select_targets = tf.gather_nd(targets_list[-1], gather_indcies) select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies) mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap, weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(len(pred_outputs) - 1), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))):
tensorflow.gather_nd
9,237
import tensorflow as tf bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel(
tensorflow.reshape
9,238
import tensorflow as tf init = tf.global_variables_initializer()
tensorflow.global_variables_initializer
9,239
import tensorflow as tf mapping_path = os.path.join(self._transformed_metadata_dir, self.ASSET_MAP) mapping = {} if tf.io.gfile.exists(mapping_path): with tf.io.gfile.GFile(mapping_path) as f: mapping = json.loads(f.read())
tensorflow.io.gfile.exists
9,240
import tensorflow as tf self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\ self.propensity,train_output) tf.summary.scalar('click_metrics',self.click_metrics,collections=['train']) for i in range(len(pw_list)): tf.summary.scalar('Inverse Propensity weights %d' % i, tf.reduce_mean(pw_list[i]), collections=['train']) tf.summary.scalar('Rank Loss', tf.reduce_mean(self.rank_loss), collections=['train']) # Compute examination loss self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output)) self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights) rw_list = tf.unstack(self.relevance_weights, axis=1) # Compute propensity weights for i in range(len(rw_list)): tf.summary.scalar('Relevance weights %d' % i, tf.reduce_mean(rw_list[i]), collections=['train']) tf.summary.scalar('Exam Loss', tf.reduce_mean(self.exam_loss), collections=['train']) # Gradients and SGD update operation for training the model. self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss # Select optimizer self.optimizer_func = tf.train.AdagradOptimizer
tensorflow.unstack
9,241
import tensorflow as tf predict_examples = processor.get_test_examples(FLAGS.data_dir) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d", len(predict_examples)) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) if FLAGS.use_tpu: # Warning: According to tpu_estimator.py Prediction on TPU is an
tensorflow.logging.info
9,242
import tensorflow as tf def weight_decay(penalty_type, penalty): """Add weight decay. Args: model: TensorflowGraph. Returns: A scalar tensor containing the weight decay cost. Raises: NotImplementedError: If an unsupported penalty type is requested. """ variables = [] # exclude bias variables for v in tf.trainable_variables(): if v.get_shape().ndims == 2: variables.append(v) with tf.name_scope('weight_decay'): if penalty_type == 'l1': cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables]) elif penalty_type == 'l2': cost = tf.add_n([tf.nn.l2_loss(v) for v in variables]) else: raise NotImplementedError('Unsupported penalty_type %s' % penalty_type) cost *= penalty #tf.scalar_summary('Weight Decay Cost', cost) return cost
tensorflow.trainable_variables
9,243
from tensorflow.python.framework import ops the return type is `quint8`. """ with ops.op_scope([x], name, "Tanh") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._tanh(x, name=name) ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
9,244
import tensorflow as tf return tf.reshape(tf.concat(axis=1, values=h), [-1, nh]) else: return tf.reshape(tf.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u
tensorflow.split
9,245
import tensorflow as tf # TODO (jk): removed dilations=dilations to accommodate r1.4 else: activities = tf.nn.conv3d( data,
tensorflow.nn.conv3d
9,246
import tensorflow as tf def sussillo_reg(self): states = self.states reg = 0 for state in states: dJr = tf.matmul(tf.nn.relu(state), tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += tf.reduce_sum(tf.square(dJr)) return reg / (self.N_steps * self.N_batch) # train the model using Adam def train(self, sess, generator, learning_rate=.001, training_iters=50000, batch_size=64, display_step=10,weight_save_step=100, save_weights_path= None, generator_function= None, training_weights_path = None):
tensorflow.square
9,247
import tensorflow as tf sess.run([y[0].op, z[0].op]) def testNoInput(self): with self.test_session(): x, = tf.py_func(lambda: 42.0, [], [tf.float64]) self.assertAllClose(x.eval(), 42.0) def testCleanup(self): for _ in xrange(1000): g = tf.Graph() with g.as_default(): c = tf.constant([1.], tf.float32) _ = tf.py_func(lambda x: x + 1, [c], [tf.float32]) self.assertTrue(script_ops._py_funcs.size() < 100) def testError(self): with self.test_session(): def bad1(): # Structured numpy arrays aren't supported. return np.array([], dtype=[("foo", np.float32)])
tensorflow.constant
9,248
import tensorflow as tf if embeddings is not None: flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
tensorflow.multiply
9,249
from tensorflow.python.framework import tensor_shape """Shape function for the SparseSegmentMeanGrad op.""" input_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape().with_rank(1) unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape) unused_output_dim0_shape = op.inputs[3].get_shape().merge_with( tensor_shape.scalar()) output_dim0 = tensor_util.ConstantValue(op.inputs[3]) if output_dim0 is not None: dim0 = output_dim0[0] else:
tensorflow.python.framework.tensor_shape.scalar
9,250
import tensorflow as tf states[name] = tf.gather(params=self.states_memory[name], indices=indices) internals = dict() for name in sorted(self.internals_memory): internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict() for name in sorted(self.actions_memory): actions[name] = tf.gather(params=self.actions_memory[name], indices=indices) terminal = tf.gather(params=self.terminal_memory, indices=indices) reward = tf.gather(params=self.reward_memory, indices=indices) if self.include_next_states: assert util.rank(indices) == 1 next_indices = (indices + 1) % self.capacity next_states = dict() for name in sorted(self.states_memory): next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)
tensorflow.gather
9,251
import tensorflow as tf predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
tensorflow.logging.info
9,252
import tensorflow as tf pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = tf.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train) clf_logits = tf.reshape(clf_logits, [-1, 2]) clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y) return clf_logits, clf_losses, lm_losses
tensorflow.nn.dropout
9,253
import tensorflow as tf bboxes = tf.placeholder(tf.float32) bboxes_val = [[10, 10, 20, 22]] gt_boxes = tf.placeholder(tf.float32) gt_boxes_val = [[11, 13, 34, 31]]
tensorflow.placeholder
9,254
from tensorflow.python.ops import math_ops """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
tensorflow.python.ops.math_ops.reduce_sum
9,255
import tensorflow as tf tf.app.flags.DEFINE_float( 'negative_ratio', 3., 'Negative ratio in the loss function.') tf.app.flags.DEFINE_float( 'match_threshold', 0.56, 'Matching threshold in the loss function.') tf.app.flags.DEFINE_float( 'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.') # optimizer related configuration tf.app.flags.DEFINE_float( 'weight_decay', 0.0005, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') tf.app.flags.DEFINE_float(
tensorflow.app.flags.DEFINE_float
9,256
import tensorflow as tf with tf.variable_scope(scope): batch_size, height, width, num_channels = x.get_shape().as_list() f = conv(x, scope='f_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) f = tf.layers.max_pooling2d(f, pool_size=2, strides=2, padding='SAME') print('attention f dims: ' + str(f.get_shape().as_list()))
tensorflow.layers.max_pooling2d
9,257
import tensorflow as tf if FLAGS.debug: utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
tensorflow.nn.dropout
9,258
import tensorflow as tf target_init_op = [ tf.assign(target, source) for target, source in zip(target_params, source_params) ] # Control flow is used because sess.run otherwise evaluates in nondeterministic order # and we first need to compute the policy action before computing q values losses with tf.control_dependencies([policy_train_op]): train_values_op = value_optimizer.minimize(values_losses, var_list=values_params) self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy'] # All ops to call during one training step self.step_ops = [policy_loss, qf1_loss, qf2_loss, value_loss, qf1, qf2, value_fn, logp_pi,
tensorflow.control_dependencies
9,259
import tensorflow as tf mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode,
tensorflow.metrics.mean
9,260
import tensorflow as tf mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos) # landm loss (smooth L1) mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true)) loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b), tf.boolean_mask(landm_pred, mask_landm_b)) loss_landm = tf.reduce_mean(loss_landm) # localization loss (smooth L1) mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true)) loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b), tf.boolean_mask(loc_pred, mask_pos_b))
tensorflow.reduce_mean
9,261
from tensorflow.python.ops import math_ops if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights)
tensorflow.python.ops.math_ops.to_float
9,262
from tensorflow.python.ops import array_ops # logic. with ops.device(None): if all(tensor.shape == tensor_shape.scalar() for tensor in tensors): with ops.device(tensors[0].device): values = array_ops.stack(tensors) with ops.device(device): return array_ops.unstack(values) else: with ops.device(tensors[0].device): sizes = array_ops.stack( [array_ops.shape(tensor)[0] for tensor in tensors]) values = array_ops.concat(tensors, axis=0) with ops.device(device): sizes = array_ops.unstack(sizes) return list(array_ops.split(values, sizes, axis=0)) def _scheduled_stamp_resource_op_runner(batch, stamp): """Runs a batch operation on a stamped resource.""" if not batch: return arg_keys = set(batch[0].args.keys())
tensorflow.python.ops.array_ops.concat
9,263
import tensorflow as tf # initialize filter W = tf.get_variable( name='W', shape=[filter_size, num_feature, 1, num_filter], initializer=tf.contrib.layers.xavier_initializer_conv2d()) # convolve w and input conv = tf.nn.conv2d(
tensorflow.contrib.layers.xavier_initializer_conv2d
9,264
import tensorflow as tf input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return
tensorflow.reduce_sum
9,265
import tensorflow as tf tf.nn.conv2d(input_var, w, use_cudnn_on_gpu=True,data_format='NCHW', strides=self.strides, padding=self.padding), b,data_format='NCHW',name=name) else : return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b}
tensorflow.nn.conv2d
9,266
import tensorflow as tf else: #tanh by default lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif cell_type == 'GRU': if activation == 'linear': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.nn.relu) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: gru=tf.nn.rnn_cell.GRUCell(state_size) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: if activation == 'linear': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size, activation=tf.nn.relu) cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32, input_size=num_input, input_keep_prob=input_prob, state_keep_prob=state_prob) else: #tanh by default
tensorflow.nn.rnn_cell.GRUCell
9,267
import tensorflow as tf # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with tf.Graph().as_default(): v0 = tf.Variable([10.0], name="v0") v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1])) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"): tf.train.Saver([v0, v1, v2])
tensorflow.Variable
9,268
import tensorflow as tf tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')
tensorflow.app.flags.DEFINE_string
9,269
import tensorflow as tf def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
tensorflow.parse_single_example
9,270
import tensorflow as tf # session info sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False
tensorflow.ConfigProto
9,271
import tensorflow as tf out = tf.matmul(self.fc1, w) + b self.fc2 = tf.nn.relu(out) # fc3 with tf.variable_scope('fc3'): w = tf.get_variable('w', [self.fc2.get_shape()[1], num_classes], initializer=initializer, regularizer=regularizer) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(1.0)) self.fc3 = tf.matmul(self.fc2, w) + b # Calculate Mean cross-entropy loss with tf.name_scope("loss"): self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses) # Accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.argmax
9,272
import tensorflow as tf saver = tf.train.Saver() x, prediction, output_class, new_sess = self.saveAndRestoreModel( self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False) test_inputs, expected_output = self.getInferenceResult( x, output_class, new_sess) # Test Toco-converted model. result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False) self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2)) @test_util.enable_control_flow_v2 def testDynamicRnnMultiRnnCell(self): sess = tf.compat.v1.Session(config=CONFIG) x, prediction, output_class = self.buildModel( self.buildLstmLayer(), is_dynamic_rnn=True) self.trainModel(x, prediction, output_class, sess) saver = tf.train.Saver() x, prediction, output_class, new_sess = self.saveAndRestoreModel( self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True) test_inputs, expected_output = self.getInferenceResult( x, output_class, new_sess)
tensorflow.compat.v1.Session
9,273
import tensorflow as tf geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2)
tensorflow.where
9,274
import tensorflow as tf """ # TODO(user): gradient clipping (see Minimize) if optimizer == 'adagrad': train_op = tf.train.AdagradOptimizer(learning_rate) elif optimizer == 'adam': train_op = tf.train.AdamOptimizer(learning_rate) elif optimizer == 'momentum': train_op = tf.train.MomentumOptimizer(learning_rate, momentum) elif optimizer == 'rmsprop': train_op = tf.train.RMSPropOptimizer(learning_rate, momentum) elif optimizer == 'sgd': train_op = tf.train.GradientDescentOptimizer(learning_rate) else: raise NotImplementedError('Unsupported optimizer %s' % optimizer) return train_op
tensorflow.train.RMSPropOptimizer
9,275
import tensorflow.contrib.graph_editor as ge - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive, so checkpointing them maximizes the running speed (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory) - 'memory': try to minimize the memory usage (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint) - 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint ''' # print("Calling memsaving gradients with", checkpoints) if not isinstance(ys,list): ys = [ys] if not isinstance(xs,list): xs = [xs] bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True) debug_print("bwd_ops: %s", bwd_ops) # forward ops are all ops that are candidates for recomputation fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops) debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs]
tensorflow.contrib.graph_editor.get_backward_walk_ops
9,276
import tensorflow as tf # checkpoint related configuration tf.app.flags.DEFINE_string( 'checkpoint_path', './model/resnet50',#None, 'The path to a checkpoint from which to fine-tune.') tf.app.flags.DEFINE_string( 'checkpoint_model_scope', '', 'Model scope in the checkpoint. None if the same as the trained model.') tf.app.flags.DEFINE_string( 'model_scope', 'xdet_resnet', 'Model scope name used to replace the name_scope in checkpoint.') tf.app.flags.DEFINE_string( 'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.') tf.app.flags.DEFINE_boolean( 'ignore_missing_vars', True, 'When restoring a checkpoint would ignore missing variables.') tf.app.flags.DEFINE_boolean( 'run_on_cloud', True, 'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").') tf.app.flags.DEFINE_string( 'cloud_checkpoint_path', 'resnet50/model.ckpt', 'The path to a checkpoint from which to fine-tune.') FLAGS = tf.app.flags.FLAGS def input_pipeline():
tensorflow.app.flags.DEFINE_boolean
9,277
import tensorflow as tf pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) # Optimizer with tf.name_scope('optimizer'): if model_str == 'gcn_ae': opt = OptimizerAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]), pos_weight=pos_weight, norm=norm) elif model_str == 'gcn_vae': opt = OptimizerVAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]),
tensorflow.sparse_tensor_to_dense
9,278
import tensorflow as tf logit_w = tf.get_variable('W', shape=[dnn_output_size, 1], initializer=tf.truncated_normal_initializer(stddev=1.0 / dnn_output_size, dtype=dtype), dtype=dtype) logit_b = tf.get_variable('b', shape=[1], initializer=tf.constant_initializer(0.0), dtype=dtype) logits = tf.squeeze(tf.nn.bias_add(tf.matmul(dnn_output, logit_w), logit_b), squeeze_dims=[1]) prediction = tf.nn.sigmoid(logits) prediction_inspect = tf.reshape(prediction, [batch_size, rnn_nunroll]) prediction_final = tf.squeeze(tf.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1]) print('logit: {}'.format(logits.get_shape())) # Compute loss if mode != 'gen': neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods) else: neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights) # be careful to have at least one weight be nonzero # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights) neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll])
tensorflow.nn.sigmoid_cross_entropy_with_logits
9,279
import tensorflow as tf output: [] for output in model_options.outputs_to_num_classes } with tf.variable_scope(tf.get_variable_scope(), reuse=None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options,
tensorflow.get_variable_scope
9,280
from tensorflow.python.layers import convolutional as conv_layers input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding=mode, data_format=self.channel_pos, use_bias=False) else: # Special padding mode for ResNet models if d_height == 1 and d_width == 1: conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding='SAME', data_format=self.channel_pos, use_bias=False) else:
tensorflow.python.layers.convolutional.conv2d
9,281
import tensorflow as tf tf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation') tf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\'s exponent term')
tensorflow.app.flags.DEFINE_integer
9,282
import tensorflow as tf # load model. model = importlib.import_module(model) analysis_transform = model.AnalysisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir)) x = tf.convert_to_tensor(x_color, "float32") x_coori = tf.convert_to_tensor(x_coori, "float32") def loop_analysis(element): x = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0) element = [x,x_coori]
tensorflow.train.latest_checkpoint
9,283
import tensorflow as tf Returns: out_string: A tf.tensor of dtype string. If string_tensor contains the empty string, out_string will contain a random integer casted to a string. Otherwise string_tensor is returned unchanged. """ empty_string = tf.constant('', dtype=tf.string, name='EmptyString') random_source_id = tf.as_string( tf.random_uniform(shape=[], maxval=2 ** 63 - 1, dtype=tf.int64)) out_string = tf.cond( tf.equal(string_tensor, empty_string), true_fn=lambda: random_source_id, false_fn=lambda: string_tensor) return out_string def _get_features_dict(input_dict): """Extracts features dict from input dict.""" source_id = _replace_empty_string_with_random_number( input_dict[fields.InputDataFields.source_id]) hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
tensorflow.equal
9,284
import tensorflow as tf def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1)
tensorflow.split
9,285
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts))
tensorflow.shape
9,286
import tensorflow as tf def simple_block_attention( rep_tensor, rep_mask, block_len=5, scope=None, direction=None, keep_prob=1., is_train=None, wd=0., activation='elu', hn=None): assert direction is not None def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = hn or rep_tensor.get_shape().as_list()[2] input_dim = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'block_simple'): # @1. split sequence with tf.variable_scope('split_seq'): block_num = tf.cast(tf.ceil(tf.divide(tf.cast(sl, tf.float32), tf.cast(block_len, tf.float32))), tf.int32) comp_len = block_num * block_len - sl rep_tensor_comp = tf.concat([rep_tensor, tf.zeros([bs, comp_len, input_dim], tf.float32)], 1) rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1) rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear
tensorflow.variable_scope
9,287
import tensorflow as tf config.add_hparam("n_classes", 10) config.add_hparam("dataset", "cifar-10") # Reconstruction could cause numerical error, use double precision for tests config.dtype = tf.float64 config.fused = False # Fused batch norm does not support tf.float64 # Reduce the batch size for tests because the OSS version runs # in constrained GPU environment with 1-2GB of memory. config.batch_size = 2 shape = (config.batch_size,) + config.input_shape self.model = revnet.RevNet(config=config) self.x = tf.random_normal(shape=shape, dtype=tf.float64) self.t = tf.random_uniform( shape=[config.batch_size], minval=0, maxval=config.n_classes, dtype=tf.int64) self.config = config def tearDown(self): del self.model del self.x del self.t
tensorflow.random_uniform
9,288
import tensorflow as tf self._saver = None self.global_step = None tf.set_random_seed(seed) #restore checkpoint
tensorflow.set_random_seed
9,289
import tensorflow as tf def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:, :int(max(num_objects)), :].astype(np.float32), \ gtboxes_and_label_r[:, :int(max(num_objects)), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu*cfgs.BATCH_SIZE) tf.summary.scalar('lr', lr)
tensorflow.device
9,290
import tensorflow as tf if not isinstance(data, dict): data = {'data': data} if 'length' not in data: example = data[list(data.keys())[0]] data['length'] = ( tf.zeros((tf.shape(example)[0],), tf.int32) + tf.shape(example)[1]) return data def train(model_fn, datasets, logdir, config):
tensorflow.shape
9,291
import tensorflow as tf tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set') tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info') tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps') FLAGS = tf.app.flags.FLAGS slim = tf.contrib.slim AUTOENCODER = 'ae' PREDICTIVE = 'pred' DENOISING = 'noise' CHECKPOINT_NAME = '-9999.chpt' EMB_SUFFIX = '_embedding' def is_stopping_point(current_epoch, epochs_to_train, stop_every=None, stop_x_times=None,
tensorflow.app.flags.DEFINE_integer
9,292
import tensorflow as tf def _add_losses(self, sigma_rpn=3.0): with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label)) # RPN, bbox loss
tensorflow.not_equal
9,293
import tensorflow as tf self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses)
tensorflow.get_collection
9,294
import tensorflow as tf def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def create_model(config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, task_name,): """Creates a classification model from_scratch.""" _true_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), dtype=tf.int32) with tf.variable_scope("baseline"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (word_embedding_output, output_embedding_table) = modeling.embedding_lookup( input_ids=input_ids,
tensorflow.reduce_sum
9,295
import tensorflow as tf probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.one_hot
9,296
import tensorflow as tf tf.train.start_queue_runners() result, batch_size = session.run(output) self.assertAllEqual([[3, 5]], result) self.assertAllEqual([1], batch_size) def test_two(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([2]), tf.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) # Make sure both inputs are in the batcher before starting it. time.sleep(_SLEEP_TIME) tf.train.start_queue_runners() result0, batch_size0 = f0.get() result1, batch_size1 = f1.get() self.assertAllEqual([3], result0)
tensorflow.constant
9,297
import tensorflow as tf tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2), tf.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float')) tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
tensorflow.cast
9,298
from tensorflow.python.ops import array_ops # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1
tensorflow.python.ops.array_ops.shape
9,299