seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i]) restore_op.append(op) loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction="none") def _replica_loss(labels, logits): loss = loss_fn(labels, logits) return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size) def _train_step(inputs, labels, training): def _step_fn(inputs, labels): logit, embedding_vector = sok_sparse_demo(inputs, training=training) loss = _replica_loss(labels, logit) emb_var, other_var = sok.split_embedding_variable_from_others(sok_sparse_demo.trainable_variables) grads = tf.gradients(loss, emb_var + other_var, colocate_gradients_with_ops=True, unconnected_gradients=tf.UnconnectedGradients.NONE) emb_grads, other_grads = grads[:len(emb_var)], grads[len(emb_var):] if "plugin" in args.optimizer: emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var)) else: with sok.OptimizerScope(emb_var): emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var)) with tf.control_dependencies([*emb_grads]): # in case NCCL runs concurrently via SOK and horovod other_grads = strategy.reduce("sum", other_grads) other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var))
tensorflow.gradients
9,100
from tensorflow.contrib.learn.python.learn.datasets import base fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): if fake_data: def fake(): return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype) train = fake() validation = fake() test = fake() return base.Datasets(train=train, validation=validation, test=test) TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file)
tensorflow.contrib.learn.python.learn.datasets.base.Datasets
9,101
import tensorflow as tf input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32)
tensorflow.random_uniform
9,102
import tensorflow as tf 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]], 'target_paddings': [[1, 1, 1, 1], [0, 0, 1, 1]], 'norm_wer_errors': [[1], [0]], 'norm_wer_words': [[0], [4]], } metrics_dict = mdl.CreateDecoderMetrics() mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict) self.assertEqual(1 + 0, metrics_dict['wer'].total_value) self.assertEqual(0 + 4, metrics_dict['wer'].total_weight) self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value) self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight) def testBProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testBPropSmoothDecay(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() p.train.lr_schedule = ( schedule.ContinuousLearningRateSchedule.Params().Set(
tensorflow.set_random_seed
9,103
from tensorflow.python.keras.utils.generic_utils import register_keras_serializable return outputs @register_keras_serializable(package='Vitis', name='AveragePooling2D') class VitisAveragePooling2D(tf.keras.layers.AveragePooling2D): """Vitis version of AveragePooling2D layer.
tensorflow.python.keras.utils.generic_utils.register_keras_serializable
9,104
import tensorflow as tf h = h + b return h def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-1]]) h = h + b return h def deconv2d(x, shape, output_shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-2]]) h = h + b return h def conv3d(x, shape, name, bias=False, stride=2, padding='SAME'): with tf.variable_scope(name): W = weight_variable(shape) h = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-1]]) h = h + b return h
tensorflow.nn.conv2d_transpose
9,105
import tensorflow as tf d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all
tensorflow.shape
9,106
import tensorflow as tf flat_x = tf.layers.flatten(x) if self.use_epochs: epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) # Randomly set epoch to 0 in some cases as that's the inference value.
tensorflow.zeros
9,107
import tensorflow as tf else: remainder = tf.matmul(remainder, sliced_core) else: if remainder is not None: # Add reminder from the previous collapsed cores to the current # core. sliced_core = tf.einsum('ab,bid->aid', remainder, sliced_core) remainder = None new_tt_cores.append(sliced_core) if remainder is not None: # The reminder obtained from collapsing the last cores. new_tt_cores[-1] = tf.einsum('aib,bd->aid', new_tt_cores[-1], remainder) remainder = None # TODO: infer the output ranks and shape. return TensorTrain(new_tt_cores) def _are_tt_cores_valid(tt_cores, shape, tt_ranks): """Check if dimensions of the TT-cores are consistent and the dtypes coincide. Args: tt_cores: a tuple of `Tensor` objects shape: An np.array, a tf.TensorShape (for tensors), a tuple of
tensorflow.einsum
9,108
import tensorflow as tf cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var) # Custom Layers # def _reshape_layer(self, bottom, num_dim, name): input_shape = tf.shape(bottom) with tf.variable_scope(name):
tensorflow.nn.zero_fraction
9,109
import tensorflow as tf relative_pos_gen.make_relative_att_ids(seq_len=5, batch_size=2)) def test_make_relative_att_ids_batch_size_2_tensor(self): dummy_batch = tf.ones([2, 5]) relative_pos_gen = feature_utils.RelativePositionGenerator(max_distance=3)
tensorflow.ones
9,110
from tensorflow.contrib.eager.python.examples.revnet import config as config_ dev = tf.DeviceSpec.from_string(device).device_type.lower() name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format) extras = {"examples_per_sec": batch_size / avg_time} self.report_benchmark( iters=num_iters, wall_time=avg_time, name=name, extras=extras) def _benchmark_eager_apply(self, label, device_and_format, defun=False, execution_mode=None): config = config_.get_hparams_imagenet_56() with tfe.execution_mode(execution_mode): device, data_format = device_and_format model = revnet.RevNet(config=config) if defun: # TODO(apassos): reenable after cond lets you return None model.call = tfe.defun(model.call) batch_size = 64 num_burn = 5 num_iters = 10 with tf.device(device):
tensorflow.contrib.eager.python.examples.revnet.config.get_hparams_imagenet_56
9,111
import tensorflow as tf p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss)
tensorflow.control_dependencies
9,112
import tensorflow as tf @registry.register_model class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs"] x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 32, (4, 4), strides=(2, 2), name="conv1", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1")
tensorflow.layers.conv2d
9,113
import tensorflow as tf degree_l = [] order_m = [] for degree in range(0, max_band + 1): for order in range(-degree, degree + 1): degree_l.append(degree) order_m.append(order) return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m)) def _evaluate_legendre_polynomial_pmm_eval(m, x): pmm = tf.pow(1.0 - tf.pow(x, 2.0), tf.cast(m, dtype=x.dtype) / 2.0) ones = tf.ones_like(m) pmm *= tf.cast( tf.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return tf.cast(tf.math.count_nonzero(n <= l), tf.bool)
tensorflow.cast
9,114
import tensorflow as tf sequence_lengths = tf.convert_to_tensor(sequence_lengths) with tf.control_dependencies( [tf.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]): return tf.reverse_sequence( sequence, sequence_lengths, seq_axis=0, batch_axis=1)
tensorflow.reverse_sequence
9,115
import tensorflow as tf with tf.variable_scope(target_modality.name): log_info("Transforming body output with %s.top", target_modality.name) last_only = ( target_modality.top_is_pointwise and self.hparams.mode == tf.estimator.ModeKeys.PREDICT and not self.hparams.force_full_predict) if not last_only: logits = target_modality.top(body_output, features["targets"]) else: # Take body outputs for the last position only, and targets too. last_position_body_output = tf.expand_dims( body_output[:, -1, :, :], axis=[1]) last_position_targets = tf.expand_dims( features["targets"][:, -1:, :, :], axis=[1]) logits = target_modality.top(last_position_body_output, last_position_targets) return logits def top(self, body_output, features): if isinstance(body_output, dict): logits = {} for k, v in body_output.iteritems(): logits[k] = self._top_single(v, features) return logits
tensorflow.expand_dims
9,116
import tensorflow as tf # Project the embeddings to space dimensions for visualization tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss)
tensorflow.variable_scope
9,117
import tensorflow as tf tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate(
tensorflow.logging.info
9,118
import tensorflow as tf interpreter.set_tensor(in_node['index'], images) interpreter.invoke() prediction = interpreter.get_tensor(out_node['index']) result = tf.argmax( prediction ,axis=1).numpy() print('accuracy={:.4f}'.format(np.sum(result == y_test)/y_test.shape[0])) if __name__ == '__main__': convert_tflite()
tensorflow.argmax
9,119
import tensorflow as tf # Initialize session sess = tf.Session() sess.run(tf.global_variables_initializer())
tensorflow.global_variables_initializer
9,120
import tensorflow as tf dimensions being batch_size and window_size. :param value_if_masked: Value to fill for masked positions. :param expected_extra_dims: Expected extra dimensions. :returns: Tensor of same shape as expanded_tensor, but with `value_if_masked` filled in masked dimensions. """ mask_shape = list(map(int, self.mask.shape)) graph_typecheck.assert_shape(expanded_tensor, mask_shape + expected_extra_dims) value_if_masked = expanded_tensor.dtype.as_numpy_dtype(value_if_masked) if_masked_tensor = tf.fill(expanded_tensor.shape, value_if_masked) mask = self.mask for i in range(2, 2 + len(expected_extra_dims)): mask = tf.expand_dims(mask, axis=i) mask = tf.tile(mask, [1, 1] + expected_extra_dims) return tf.where(mask, expanded_tensor, if_masked_tensor) def initial_layer( window_feature: WindowFeatures, *, clip_magnitude=10.0, include_flux_and_time=False ) -> tf.Tensor: features = tf.expand_dims(window_feature.dflux_dt(clip_magnitude=clip_magnitude), 2) if include_flux_and_time: dflux = tf.expand_dims(window_feature.dflux, 2) dtime = tf.expand_dims(window_feature.dtime, 2) features = tf.concat([features, dflux, dtime],
tensorflow.expand_dims
9,121
import tensorflow as tf k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC' self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.g = tf.get_variable('g',[out_dim], initializer=tf.constant_initializer(float('nan'))) self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(float('nan'))) self.strides = [1, d_h, d_w, 1] self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : shapes = tf.shape(input_var) shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3]) t = tf.nn.conv2d_transpose(input_var,v_norm, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC') mu,var = tf.nn.moments(t,axes=[0,1,2]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g))
tensorflow.shape
9,122
import tensorflow as tf start_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[2]], axis=-1), name="attn1"), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1) else: start_logits = tf.squeeze(
tensorflow.concat
9,123
import tensorflow as tf # Start by setting a seed for repeatability tf.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = tf.train.Coordinator() with tf.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams)
tensorflow.train.Coordinator
9,124
import tensorflow as tf if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names:
tensorflow.train.init_from_checkpoint
9,125
import tensorflow as tf tf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)') tf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask') def calc_prune_ratio(vars_list): """Calculate the overall pruning ratio for the given list of variables. Args: * vars_list: list of variables Returns: * prune_ratio: overall pruning ratio of the given list of variables """ nb_params_nnz = tf.add_n([tf.count_nonzero(var) for var in vars_list]) nb_params_all = tf.add_n([tf.size(var) for var in vars_list]) prune_ratio = 1.0 - tf.cast(nb_params_nnz, tf.float32) / tf.cast(nb_params_all, tf.float32) return prune_ratio class WeightSparseLearner(AbstractLearner): # pylint: disable=too-many-instance-attributes """Weight sparsification learner.""" def __init__(self, sm_writer, model_helper): """Constructor function. Args: * sm_writer: TensorFlow's summary writer
tensorflow.size
9,126
import tensorflow as tf # Parse 'None' into None. input_partition_dims = [ None if x == 'None' else _maybe_transpose(x) for x in input_partition_dims ] else: tpu_cluster_resolver = None # Sets up config for TPUEstimator. tpu_config = tf.contrib.tpu.TPUConfig( params.train.iterations_per_loop, num_cores_per_replica=num_cores_per_replica, input_partition_dims=input_partition_dims, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 # pylint: disable=line-too-long ) run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver,
tensorflow.contrib.tpu.TPUConfig
9,127
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
tensorflow.shape
9,128
from tensorflow.python.ops import array_ops model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) output, output_h, output_c = model( is_training=True, input_data=input_data, input_h=input_h, input_c=input_c, params=params) all_grads = gradients_impl.gradients( [output, output_h, output_c],
tensorflow.python.ops.array_ops.ones
9,129
import tensorflow as tf h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = tf.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train) clf_logits = tf.reshape(clf_logits, [-1, 2])
tensorflow.reshape
9,130
import tensorflow as tf "r").extractall(tmp_dir) if dataset[1][0] == "tmx": cleaning_requested = "tmx" in datatypes_to_clean tmx_filename = os.path.join(tmp_dir, dataset[1][1]) if tmx_filename.endswith(".gz"): with gzip.open(tmx_filename, "rb") as tmx_file: _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, do_cleaning=cleaning_requested) else: with tf.gfile.Open(tmx_filename) as tmx_file: _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, do_cleaning=cleaning_requested) elif dataset[1][0] == "tsv": _, src_column, trg_column, glob_pattern = dataset[1] filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) if not filenames: # Capture *.tgz and *.tar.gz too. mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
tensorflow.gfile.Open
9,131
import tensorflow as tf x, n, l, m, pmm, pmm1 = tf.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1 def _evaluate_legendre_polynomial_branch(l, m, x, pmm): pmm1 = x * (2.0 * tf.cast(m, dtype=x.dtype) + 1.0) * pmm # if, l == m + 1 return pmm1, otherwise lift to the next band. res = tf.where( tf.equal(l, m + 1), pmm1, _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1)) return res def evaluate_legendre_polynomial(degree_l: TensorLike, order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x)
tensorflow.equal
9,132
from tensorflow.python.framework import ops as _ops (self._name, self._shape)) assert self._send_tpu_core != self._recv_tpu_core, ( "TPU send/recv must be cross-core: %s and %s" % (send_device, recv_device)) def Send(self, tensor): """Sends a tensor through the channel.""" assert tensor.dtype == self._dtype assert not self._send_called, ( "Send called multiple times for %s" % self._name) self._send_called = True if self._send_tpu_core == -1: return _Send(tensor, self._name, self._send_device, self._recv_device) else: with _ops.device(self._send_device): return _XlaSend( tensor, tensor_name=self._name, name="Send_" + self._name) def Recv(self): """Receives a tensor from the channel.""" if self._send_tpu_core == -1: return _Recv(self._dtype, self._name, self._send_device, self._recv_device) else: with _ops.device(self._recv_device): return _XlaRecv( self._dtype, tensor_name=self._name,
tensorflow.python.framework.ops.device
9,133
import tensorflow as tf tf.compat.v2.summary.scalar( name="orig_task_optimal", data=orig_opt_frac, step=global_step) # How often is the relabelled goal optimal? # The relabel_indices are [B, 1], so we need to remove the extra dim. relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32)) tf.compat.v2.summary.scalar( name="relabel_task_optimal", data=relabel_opt_frac, step=global_step)
tensorflow.squeeze
9,134
import tensorflow as tf # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) # Compute MSE # mse = tf.metrics.mean(per_example_loss) mse = tf.metrics.mean_squared_error( label_ids, logits, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example)
tensorflow.metrics.mean_squared_error
9,135
import tensorflow as tf input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("logits"): logits = tf.layers.dense(summary, 1, kernel_initializer=xlnet_model.get_initializer()) logits = tf.reshape(logits, [bsz_per_core, 4]) one_hot_target = tf.one_hot(label, 4) per_example_loss = -tf.reduce_sum( tf.nn.log_softmax(logits) * one_hot_target, -1) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
tensorflow.one_hot
9,136
import tensorflow as tf with tf.variable_scope('fully_connected'): aux_logits = self._add_fully_connected(X, (ch,), K, no_reg=True) return aux_logits def _compute_predictions(self, logits, classes): probs = tf.nn.softmax(logits) preds = tf.argmax(logits, axis=1, output_type=tf.int32) corrects = tf.equal(preds, classes) return (probs, corrects) def _compute_loss(self, logits, aux_logits_list, classes, **knobs):
tensorflow.nn.softmax
9,137
import tensorflow as tf add_weight_decay(params['weight_decay']) regularization_loss = tf.losses.get_regularization_loss() # create localization and classification losses losses = ssd.loss(labels, params) tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss']) tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss']) tf.summary.scalar('regularization_loss', regularization_loss) tf.summary.scalar('localization_loss', losses['localization_loss']) tf.summary.scalar('classification_loss', losses['classification_loss']) total_loss = tf.losses.get_total_loss(add_regularization_losses=True) if mode == tf.estimator.ModeKeys.EVAL:
tensorflow.summary.scalar
9,138
import tensorflow as tf + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1 - self.alpha) * state) \ + self.alpha * ( tf.matmul( tf.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + tf.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state
tensorflow.matmul
9,139
import tensorflow as tf output = inputs * self.kernel if self.use_bias: output = output + self.bias if self.activation is not None: output = self.activation(output) return output nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x)) ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)), tf.ones((1, l[0].get_shape()[1]), dtype=l[1].dtype))), name=name)
tensorflow.is_nan
9,140
from tensorflow.python.ops import array_ops self._batch_ndims_is_0, 2, 1 + self.batch_ndims) event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,)) new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) return x
tensorflow.python.ops.array_ops.reshape
9,141
import tensorflow as tf print('Using ranking trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV9( pred, target[name], horizon=config.contra_horizon, margin=config.margin) objectives.append((Objective(name, contra_loss, min, include, exclude))) elif name == 'reward' and config.r_loss == 'l2': pred = heads[name](features) l2_loss = tf.compat.v1.losses.mean_squared_error(target[name], pred) # l2_loss = tf.nn.l2_loss(pred - target[name]) objectives.append((Objective(name, l2_loss, min, include, exclude))) else: if not config.aug_same and config.aug: recon_feat = tf.concat([features, target['aug']], -1) print('Use recon feature ', name, recon_feat) logprob = heads[name](recon_feat).log_prob(target[name]) # logprob = heads[name](features).log_prob(target['ori_img']) else: logprob = heads[name](features).log_prob(target[name]) objectives.append(Objective(name, logprob, max, include, exclude)) objectives = [o._replace(value=tf.reduce_mean(o.value)) for o in objectives] return objectives, cstr_pct
tensorflow.concat
9,142
import tensorflow as tf tf.app.flags.DEFINE_float( 'warmup_learning_rate', 0.00001, 'The start warm-up learning rate to avoid NAN.') tf.app.flags.DEFINE_integer( 'warmup_steps', 100, 'The total steps to warm-up.')
tensorflow.app.flags.DEFINE_integer
9,143
import tensorflow as tf tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss,
tensorflow.logging.info
9,144
import tensorflow as tf with tf.Graph().as_default(): # create a TF session for the current graph config = tf.ConfigProto() if FLAGS.enbl_multi_gpu:
tensorflow.ConfigProto
9,145
import tensorflow as tf # split input_images_split = tf.split(input_images, len(gpus)) input_score_maps_split = tf.split(input_score_maps, len(gpus)) input_geo_maps_split = tf.split(input_geo_maps, len(gpus)) input_training_masks_split = tf.split(input_training_masks, len(gpus)) tower_grads = [] reuse_variables = None for i, gpu_id in enumerate(gpus): #with tf.device('/gpu:%d' % gpu_id): with tf.name_scope('model_%d' % gpu_id) as scope: iis = input_images_split[i] isms = input_score_maps_split[i] igms = input_geo_maps_split[i] itms = input_training_masks_split[i] total_loss, model_loss = tower_loss(iis, isms, igms, itms, reuse_variables) batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) reuse_variables = True grads = opt.compute_gradients(total_loss)
tensorflow.name_scope
9,146
import tensorflow as tf def softmax_mask(val, mask): return -INF * (1 - tf.cast(mask, tf.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL] a = tf.expand_dims(tf.nn.softmax(s1), axis=2)#[N,PL,1] res = tf.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s")
tensorflow.squeeze
9,147
import tensorflow as tf if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range))
tensorflow.matmul
9,148
import tensorflow as tf def _optimize(self, loss, step, **knobs): opt_momentum = knobs['opt_momentum'] # Momentum optimizer momentum grad_clip_norm = knobs['grad_clip_norm'] # L2 norm to clip gradients by # Compute learning rate, gradients tf_trainable_vars = tf.trainable_variables() lr = self._get_learning_rate(step, **knobs) grads = tf.gradients(loss, tf_trainable_vars) self._mark_for_monitoring('lr', lr) # Clip gradients if grad_clip_norm > 0: grads = [tf.clip_by_norm(x, grad_clip_norm) for x in grads]
tensorflow.gradients
9,149
import tensorflow as tf self.sess.run(self.train_op, feed_dict={S: s}) if self.t_replace_counter % self.t_replace_iter == 0: self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]) self.t_replace_counter += 1
tensorflow.assign
9,150
import tensorflow as tf def sparse_conv2d_matmul(x, w, blk_indices, strides, padding): """ Performs 2D convolution using matrix multiplication on a sparse feature map. Naive python implementation of sparse convolution using gather and scatter. :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32. :param w: [Tensor] [I, J, C, K]. Convolution kernel, dtype float32. :param blk_indices: [Tensor] [M, h, w, 3]. Block indices of rectangles. :param strides: [list] List of 4 int, convolution strides. :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return [Tensor] [N, H', W', C]. Convolution results. """ blk_indices_ = tf.reshape(blk_indices, [-1, 3]) blk_shape = tf.shape(blk_indices) ksize = tf.shape(w) # Calculate the block strides. bstrides = _calc_block_strides(blk_shape, ksize, strides) # Calculate the output size. x_shape = tf.shape(x) out_shape = calc_out_size_4d(x_shape, ksize, strides, padding) # Pad input. x_ = _pad_input( x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides)
tensorflow.reshape
9,151
import tensorflow as tf mode = mode, predictions = predictions_dict, loss = loss, train_op = train_op, eval_metric_ops = eval_metric_ops, export_outputs = export_outputs) # Create serving input function def serving_input_fn(): feature_placeholders = { TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS]) } features = { key: tf.expand_dims(tensor, -1) for key, tensor in feature_placeholders.items() } features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2]) return tf.estimator.export.ServingInputReceiver(features, feature_placeholders) # Create custom estimator's train and evaluate function def train_and_evaluate(output_dir, use_keras): if use_keras: estimator = make_keras_estimator(output_dir) else: estimator = tf.estimator.Estimator(model_fn = simple_rnn, model_dir = output_dir)
tensorflow.expand_dims
9,152
import tensorflow as tf [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1)) print(h3.get_shape()) h4 = deconv2d(tf.concat([h3, skip_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0) return h4 with tf.variable_scope("deconv") as scope: output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) scope.reuse_variables() truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0) self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 print(tgtimg_z.get_shape()) self.out = output_h4 self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss
tensorflow.reduce_mean
9,153
from tensorflow.python.training import training_util self._training_initial_clusters, self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(training_util.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) training_hooks = [_InitializeClustersHook(
tensorflow.python.training.training_util.get_global_step
9,154
import tensorflow as tf gtboxes_and_label_h = get_horizen_minAreaRectangle( tf.reshape(gtboxes_and_label_batch[start:end], [-1, 9])) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_batch[start:end], [cfgs.BATCH_SIZE, -1, 9]) num_objects = num_objects_batch[start:end] num_objects = tf.cast(tf.reshape(num_objects, [cfgs.BATCH_SIZE, -1, ]), tf.float32) img_h = img_h_batch[start:end] img_w = img_w_batch[start:end] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_q, num_objects, img_h, img_w])
tensorflow.reshape
9,155
import tensorflow as tf pi = tf.constant(np.pi, dtype=tf.float64, name="pi") sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi") two = tf.constant(2, dtype=tf.float64, name="two") one = tf.constant(1, dtype=tf.float64, name="one") zero = tf.constant(0, dtype=tf.float64, name="zero") def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x)) def argus_integral_phalf(m_low, m_high, m0, c): """ Only valid for argus_pdf with p=0.5! Otherwise need to do numerical integral. """ def F(m_bound, name=None):
tensorflow.clip_by_value
9,156
import tensorflow as tf output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"]
tensorflow.logging.info
9,157
import tensorflow as tf import tensorflow as tf from tensorflow import keras as tf_keras # prevent Keras from using up all gpu memory if tf.executing_eagerly(): gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) else: from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config))
tensorflow.config.experimental.set_memory_growth
9,158
import tensorflow as tf init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) if "plugin" in args.optimizer: init_op = tf.group(init_op, emb_opt.initializer) save_op = list() for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers): control_inputs = [save_op[-1]] if save_op else None with tf.control_dependencies(control_inputs): if args.save_params: filepath = r"./embedding_variables/" utils.try_make_dirs(filepath) op = sok_saver.dump_to_file(embedding_layer.embedding_variable, filepath) else: op = tf.constant(1.0)
tensorflow.control_dependencies
9,159
import tensorflow as tf import utils as ut import input as inp import visualization as vis import matplotlib.pyplot as plt import time import sys import getch import model_interpreter as interpreter import network_utils as nut import math from tensorflow.contrib.tensorboard.plugins import projector from Bunch import Bunch tf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') tf.app.flags.DEFINE_string('input_name', '', 'input folder') tf.app.flags.DEFINE_string('test_path', '', 'test set folder') tf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') tf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') tf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') tf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') tf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') tf.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') tf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') tf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') tf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective')
tensorflow.app.flags.DEFINE_string
9,160
import tensorflow as tf idx1 = cell_arch[bi][0] idx2 = cell_arch[bi][2] block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32) block_uses.append(block_use) block_uses = tf.add_n(block_uses) unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1]) num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0)
tensorflow.size
9,161
import tensorflow as tf self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]) self.t_replace_counter += 1 def choose_action(self, s): s = s[np.newaxis, :] # single state return self.sess.run(self.a, feed_dict={S: s})[0] # single action def add_grad_to_graph(self, a_grads): with tf.variable_scope('policy_grads'): # ys = policy; # xs = policy's parameters; # self.a_grads = the gradients of the policy to get more Q # tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams self.policy_grads_and_vars = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads) with tf.variable_scope('A_train'): opt = tf.train.RMSPropOptimizer(-self.lr) # (- 1_tensorflow_new rate) for ascent policy self.train_op = opt.apply_gradients(zip(self.policy_grads_and_vars, self.e_params), global_step=GLOBAL_STEP) ############################### Critic #################################### class Critic(object): def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter, a, a_): self.sess = sess self.s_dim = state_dim self.a_dim = action_dim self.lr = learning_rate self.gamma = gamma self.t_replace_iter = t_replace_iter
tensorflow.variable_scope
9,162
import tensorflow as tf # Reshape x_discrete shape_x = common_layers.shape_list(x) shape_discrete = shape_x[:-1] x_discrete = tf.reshape(x_discrete, shape_discrete) x_means = tf.reshape(x_means, shape=shape_x) h1 = x + tf.stop_gradient(x_means - x) h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense(
tensorflow.reshape
9,163
import tensorflow as tf num_filters = int(arguments['--num-filters']) batch_size = int(arguments['--batch-size']) print(' * [INFO] %s model (Filters: %d, Batch Size: %d)' % ( model, num_filters, batch_size)) save_path = atari_learn( env, session, num_timesteps=int(arguments['--timesteps']), num_filters=num_filters, model=model, batch_size=batch_size, restore=arguments['--restore'], checkpoint_dir=arguments['--ckpt-dir'], learning_starts=arguments['--learning-starts']) reader = tf.train.NewCheckpointReader(save_path) W = reader.get_tensor('q_func/action_value/fully_connected/weights') print('Largest entry:', np.linalg.norm(W, ord=np.inf)) print('Frobenius norm:', np.linalg.norm(W, ord='fro')) if __name__ == "__main__": main()
tensorflow.train.NewCheckpointReader
9,164
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression # Perform burn-in. linear_regression.fit(model, burn_in_dataset, optimizer)
tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.fit
9,165
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils 'accuracy/threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics) self._assertCommonMetrics(metrics) def _assertCommonMetrics(self, metrics): estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics) estimator_test_utils.assert_in_range(0.0, 0.2, 'loss', metrics) self.report_benchmark( iters=metrics['global_step'], extras={k: v for k, v in metrics.items() if k in _METRIC_KEYS}) def benchmarkMatrixData(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column(
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
9,166
import tensorflow as tf def _transform(theta, input_dim, out_size, z_near, z_far): with tf.variable_scope('_transform'): num_batch = input_dim.get_shape().as_list()[0] num_channels = input_dim.get_shape().as_list()[4] theta = tf.reshape(theta, (-1, 4, 4)) theta = tf.cast(theta, 'float32') out_depth = out_size[0] out_height = out_size[1] out_width = out_size[2] grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far) grid = tf.expand_dims(grid, 0) grid = tf.reshape(grid, [-1]) grid = tf.tile(grid, tf.stack([num_batch])) grid = tf.reshape(grid, tf.stack([num_batch, 4, -1])) # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1). t_g = tf.matmul(theta, grid) z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) z_s_flat = tf.reshape(z_s, [-1]) y_s_flat = tf.reshape(y_s, [-1]) x_s_flat = tf.reshape(x_s, [-1])
tensorflow.stack
9,167
import tensorflow as tf Returns Tensor with same shape as bboxes but making sure that none of the bboxes are outside the image. """ with tf.name_scope('BoundingBoxTransform/clip_bboxes'): bboxes = tf.cast(bboxes, dtype=tf.float32) imshape = tf.cast(imshape, dtype=tf.float32) x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1) width = imshape[1] height = imshape[0] x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0) x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0) y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0) y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0) bboxes = tf.concat([x1, y1, x2, y2], axis=1) return bboxes def change_order(bboxes): """Change bounding box encoding order.
tensorflow.minimum
9,168
import tensorflow as tf sel = tf.reshape(sel, xs[:-1] + [1, nr_mix]) # select logistic parameters means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4) log_scales = tf.maximum(tf.reduce_sum( l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.) coeffs = tf.reduce_sum(tf.nn.tanh( l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4) # sample from logistic & clip to interval # we don't actually round to the nearest 8bit value when sampling u = tf.random_uniform(tf.shape(means), minval=1e-5, maxval=1. - 1e-5) x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u)) x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.) x1 = tf.minimum(tf.maximum( x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.) x2 = tf.minimum(tf.maximum( x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.) return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
tensorflow.log
9,169
import tensorflow as tf sequence_max_length = hidden.shape[1] multipliers = tf.concat(
tensorflow.concat
9,170
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell cell = PLSTM(encoder.cell_size, reuse=reuse, fact_size=encoder.lstm_fact_size, proj_size=encoder.lstm_proj_size) elif encoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm, input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob) else: cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm) if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob, output_keep_prob=encoder.rnn_output_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout, dtype=tf.float32, input_size=input_size) return cell batch_size = tf.shape(encoder_inputs_)[0] time_steps = tf.shape(encoder_inputs_)[1]
tensorflow.contrib.rnn.DropoutWrapper
9,171
import tensorflow as tf value_estimator = np.zeros(n_particles,dtype=object); # call proper policy and value estimators for each envs for i in range(n_particles): if(ENV_NAME=="Pendulum-v0"): policy_estimator[i] = PolicyEstimator_Pendulum(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_Pendulum(learning_rate=VALUE_LR,par_idx=i) if(ENV_NAME=="MountainCarContinuous-v0"): policy_estimator[i] = PolicyEstimator_MountainCarContinuous(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i) value_estimator[i] = ValueEstimator_MountainCarContinuous(learning_rate=VALUE_LR,par_idx=i) svpg=SVPG(policy_estimator,independent_flag_svpg,learning_rate=POLICY_LR); with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Note, due to randomness in the policy the number of episodes you need varies # TODO: Sometimes the algorithm gets stuck, I'm not sure what exactly is happening there. stats = advantage_actor_critic(env, policy_estimator, value_estimator, svpg, NUM_EPISODES, MAX_EPI_STEP,discount_factor=DISCOUNT_FACTOR)
tensorflow.Session
9,172
import tensorflow as tf l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum) loss=-tf.reduce_sum(labels*tf.log(predicts)) return loss def optimer(self,loss,lr=0.001): train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss)
tensorflow.nn.relu
9,173
from tensorflow.python.platform import gfile sd = save.as_saver_def() self.assertTrue(sd.sharded) class MaxToKeepTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable(10.0, name="v") save = tf.train.Saver({"v": v}, max_to_keep=2) tf.initialize_all_variables().run()
tensorflow.python.platform.gfile.DeleteRecursively
9,174
import tensorflow as tf dists.Exponential, dists.Gamma, dists.InverseGamma, dists.Laplace, dists.StudentT, dists.Uniform] sample_shapes = [(), (10,), (10, 20, 30)] with self.test_session(): for cls in classes: for sample_shape in sample_shapes: param_shapes = cls.param_shapes(sample_shape) params = dict([(name, tf.random_normal(shape)) for name, shape in param_shapes.items()]) dist = cls(**params) self.assertAllEqual(sample_shape, tf.shape(dist.sample()).eval()) dist_copy = dist.copy() self.assertAllEqual(sample_shape, tf.shape(dist_copy.sample()).eval()) self.assertEqual(dist.parameters, dist_copy.parameters) def testCopyExtraArgs(self): with self.test_session(): # Note: we cannot easily test all distributions since each requires
tensorflow.random_normal
9,175
from tensorflow.python.ops import array_ops name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( 0, (ones[:dim], (multiple,), ones[dim:]), name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope) def sparse_average_precision_at_k(predictions, labels, k): """Computes average precision@k of predictions with respect to sparse labels. From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula for each row is: AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items A "row" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,
tensorflow.python.ops.array_ops.concat
9,176
import tensorflow as tf # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone.scope)) if total_loss is not None: # Add total_loss to summary. summaries.add(tf.summary.scalar('total_loss', total_loss)) if summaries: # Merge all summaries together. summary_op = tf.merge_summary(list(summaries), name='summary_op') else: summary_op = None
tensorflow.summary.scalar
9,177
from tensorflow.python import tf2 def _check_tensorflow_version(): """Check that we're using a compatible TF version. Raises a warning if either Tensorflow version is less that 2.0 or TF 2.x is not enabled. If TF 2.x is enabled, but version is < TF 2.3, raises a warning to indicate that resources may not be initialized. """ major, minor, _ = tf.version.VERSION.split('.') if not (int(major) >= 2 and tf2.enabled()): tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer is supported ' 'only for TF 2.x with TF 2.x behaviors enabled and may not work as ' 'intended.', tf.version.VERSION) elif int(major) == 2 and int(minor) < 3: # TODO(varshaan): Log a more specific warning. tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer may not work ' 'as intended if the SavedModel contains an initialization op.', tf.version.VERSION)
tensorflow.python.tf2.enabled
9,178
import tensorflow as tf conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") if FLAGS.debug: utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, 150], name="W8") b8 = utils.bias_variable([150], name="b8")
tensorflow.nn.dropout
9,179
import tensorflow as tf if bstrides is not None: # Here we do not use the standard padding on the right hand side. # If the convolution results is larger than expected, the scatter function will not use # out-of-boundary points. assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] + pad_h0 + pad_h1 w = x_shape[2] + pad_w0 + pad_w1 pad_h1 += tf.mod(-h + bsize[1], bstrides[1]) pad_w1 += tf.mod(-w + bsize[2], bstrides[2]) return tf.pad(x, [[0, 0], [pad_h0, pad_h1], [pad_w0, pad_w1], [0, 0]]) else: if bstrides is not None: assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] w = x_shape[2] pad_h1 = tf.mod(-h + bsize[1], bstrides[1]) pad_w1 = tf.mod(-w + bsize[2], bstrides[2]) return tf.cond( tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)),
tensorflow.pad
9,180
import tensorflow as tf try: pred_label = tf.argmax(distillation_loss["st_logits"], axis=-1, output_type=tf.int32)
tensorflow.argmax
9,181
import tensorflow as tf tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_train_eval: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files,
tensorflow.logging.info
9,182
from tensorflow.python.framework import tensor_shape array_ops.shape(self.alpha), array_ops.shape(self.beta)) def _get_batch_shape(self): return array_ops.broadcast_static_shape( self.alpha.get_shape(), self.beta.get_shape()) def _event_shape(self): return constant_op.constant([], dtype=dtypes.int32) def _get_event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed) def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x)
tensorflow.python.framework.tensor_shape.scalar
9,183
from tensorflow.contrib.framework import tensor_util tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels)
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
9,184
from tensorflow.python.framework import ops return features return {"": features} def _get_train_ops(self, features, targets): """See base class.""" global_step = contrib_variables.get_global_step() assert global_step features = self._get_feature_dict(features) logits = self._logits(features, is_training=True) if self._enable_centered_bias: centered_bias_step = [self._centered_bias_step(targets, features)] else: centered_bias_step = [] with ops.control_dependencies(centered_bias_step): loss = self._target_column.loss(logits, targets, features) logging_ops.scalar_summary("loss", loss) linear_train_step = self._linear_model.get_train_step(loss) dnn_train_step = (self._dnn_model.get_train_step(loss) if self._dnn_model else []) with ops.control_dependencies(linear_train_step + dnn_train_step): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _get_eval_ops(self, features, targets, metrics=None): raise NotImplementedError
tensorflow.python.framework.ops.control_dependencies
9,185
import tensorflow as tf elif params.learning_rate_decay == "new_warmup_rsqrt_decay": step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5), (step + 1) ** -0.5) return learning_rate * decay elif params.learning_rate_decay == "rnnplus_warmup_decay": step = tf.to_float(global_step) n = float(len(params.device_list)) warmup_steps = tf.to_float(params.warmup_steps) decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s))))) return tf.maximum(learning_rate * decay, 5e-6) elif params.learning_rate_decay == "piecewise_constant": return tf.train.piecewise_constant(tf.to_int32(global_step), params.learning_rate_boundaries,
tensorflow.to_float
9,186
import tensorflow as tf
tensorflow.zeros
9,187
import tensorflow as tf x, prediction, output_class = self.buildModel( self.buildLstmLayer(), is_dynamic_rnn=True) self.trainModel(x, prediction, output_class, sess) saver = tf.train.Saver() x, prediction, output_class, new_sess = self.saveAndRestoreModel( self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True)
tensorflow.train.Saver
9,188
import tensorflow as tf total_loss += tf.add_n(self.regularizers, name="regularization") # 1st part of minimize: compute_gradient self.grads_and_vars = self._optimizer.compute_gradients(total_loss) # clip gradients clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple) # compute norms in case they need to be logged self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars] self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars] # check that gradients are finite grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars] variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars] self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)] # 2nd part of minimize: apply_gradient optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step) update_ops = tf.group(*self.update_ops) self.training_op = tf.group(update_ops, optimizer_step) def set_check_ops(self): self._check_ops = 1 # TODO argo2 This is not working anymore with the new session #with self.sess.graph.as_default():
tensorflow.global_norm
9,189
import tensorflow as tf t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12): bs, epi_len = input.shape[:2] new_w = epi_len - horizon + 1 weights = np.zeros([epi_len, new_w]) for i in range(new_w): weights[i:i + horizon, i] = 1.0 weights = tf.convert_to_tensor(weights, dtype=tf.float32) horizon_sum = tf.matmul(input, weights) return horizon_sum def horizon_sumV2(pred, tgt, horizon=12): bs, epi_len = 50, 50 weights_list = [] for h in range(1, horizon + 1): new_w = epi_len - h + 1 weights = np.zeros([epi_len, epi_len]) for i in range(new_w): weights[i:i + h, i] = 1.0 weights_list += [weights] weights_tensors = tf.stack([tf.convert_to_tensor(weights, dtype=tf.float32) for weights in weights_list])
tensorflow.matmul
9,190
import tensorflow as tf batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids":
tensorflow.constant
9,191
import tensorflow as tf def _load_global_step_from_checkpoint_dir(checkpoint_dir): try: checkpoint_reader = tf.train.NewCheckpointReader( tf.train.latest_checkpoint(checkpoint_dir)) return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP) except: # pylint: disable=bare-except return 0
tensorflow.train.latest_checkpoint
9,192
import tensorflow as tf fetches = [lrs.Value(_) for _ in steps] values = sess.run(fetches) self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values) def testBatchSplit(self): def Run(num_splits): p = self._testParams() with self.session(use_gpu=False, graph=tf.Graph()) as sess: tf.set_random_seed(93820981) p.is_eval = True p.input.cur_iter_in_seed = False p.input.bucket_batch_limit = [ b * 2 / num_splits for b in p.input.bucket_batch_limit ] with cluster_factory.ForTestingWorker(gpus=num_splits):
tensorflow.Graph
9,193
import tensorflow as tf print("Create model") model = Model( placeholders=placeholders, num_feat=num_feat, nonzero_feat=nonzero_feat, edge_types=edge_types, decoders=edge_type2decoder, ) print("Create optimizer") with tf.name_scope('optimizer'): opt = Optimizer( embeddings=model.embeddings, latent_inters=model.latent_inters, latent_varies=model.latent_varies, degrees=degrees, edge_types=edge_types, edge_type2dim=edge_type2dim, placeholders=placeholders, batch_size=FLAGS.batch_size, margin=FLAGS.max_margin
tensorflow.name_scope
9,194
import tensorflow as tf self.block3_5 = self.res_block_3_layers(self.block3_4, [256, 256, 1024], "res4e")# 14*14 self.block3_6 = self.res_block_3_layers(self.block3_5, [256, 256, 1024], "res4f")# 14*14 #[None 7 7 512] self.pool4 = self.max_pool(self.block3_6, 2, 2, "pool4")# 14*14 self.block4_1 = self.res_block_3_layers(self.pool4, [512, 512, 2048], "res5a", True)# 7*7 self.block4_2 = self.res_block_3_layers(self.block4_1, [512, 512, 2048], "res5b")# 7*7 self.block4_3 = self.res_block_3_layers(self.block4_2, [512, 512, 2048], "res5c")# 7*7 # upsample layer begins self.deconv_1 = self.deconv_bn_relu(self.block4_3, name = 'deconv_1',kernel_size = 3, output_channels = 1024, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 14*14 self.deconv_2 = self.deconv_bn_relu(self.deconv_1, name = 'deconv_2',kernel_size = 3, output_channels = 512, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 28*28 self.deconv_3 = self.deconv_bn_relu(self.deconv_2, name = 'deconv_3',kernel_size = 3, output_channels = 256, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 56*56 self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112 self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224 # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False) # self.pool5 = self.avg_pool(self.block4_3, 7, 1, "pool5") #self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0") #self.relu1 = tf.nn.relu(self.fc0) #if train_mode is not None: # self.relu1 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu1, self.dropout), lambda: self.relu1)
tensorflow.contrib.layers.variance_scaling_initializer
9,195
import tensorflow as tf scales_to_logits[MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], scales_to_logits[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = _resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32) outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) return outputs_to_predictions def predict_labels(images, model_options): """Predicts segmentation labels.
tensorflow.nn.softmax
9,196
from tensorflow.python.framework import ops """Makes assignments depend on the cached value, if any. This prevents undefined behavior with reads not ordered wrt writes. Yields: None. """ if self._cached_value is not None: with ops.control_dependencies([self._cached_value]): yield else: yield @property def initializer(self): return control_flow_ops.group([v.initializer for v in self._vars])
tensorflow.python.framework.ops.control_dependencies
9,197
import tensorflow as tf for var in tf.all_variables(): print(var) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True},
tensorflow.where
9,198
import tensorflow as tf """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" config = AlbertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `AlbertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output
tensorflow.gfile.GFile
9,199