seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
from tensorflow.python.ops import array_ops predictions = self._logits_to_predictions(logits, proba=False) result.update(self._run_metrics(predictions, targets, metrics, self._get_weight_tensor(features))) return result def _get_predict_ops(self, features): """See base class.""" logits = self._logits(features) return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) +
tensorflow.python.ops.array_ops.reshape
10,000
import tensorflow as tf def get_a_cell(state_size,input_prob,state_prob,num_input): if cell_type == 'LSTM': if activation == 'linear': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: #tanh by default lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True) cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
tensorflow.nn.rnn_cell.LSTMCell
10,001
import tensorflow as tf ------------------- e sigma*(2*pi)^(1/2) ''' def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * tf.sqrt(2.0 * np.pi)) a = - tf.square(sample - mean) / (2.0 * tf.square(loc_std)) return Z * tf.exp(a) class ACNet: def __init__(self, scope, GRID_SIZE, a_size, trainer,TRAINING, GLOBAL_NET_SCOPE): with tf.variable_scope(str(scope)+'/qvalues'): #The input size may require more work to fit the interface. self.inputs = tf.placeholder(shape=[None,GRID_SIZE,GRID_SIZE, num_channels], dtype=tf.float32) # input state # self.goal_pos = tf.placeholder(shape=[None,2],dtype=tf.float32) self.prev_loc = tf.placeholder(shape=[None,2], dtype=tf.float32) # self.policy, self.next_loc, self.value, self.state_out, self.state_in, self.state_init, self.valids, self.blocking, self.mypos, self.goalpos, self.next_loc_mean = self._build_net(self.inputs, self.inputs_primal, self.prev_loc, RNN_SIZE, TRAINING,a_size) ''' CHANGES - removed target_blocking, blocking layers, blocking_loss - removed imitation gradients and losss - removed valid_loss - removed train_valid
tensorflow.placeholder
10,002
import tensorflow as tf an `AdaptiveEncodingStageInterface`. Returns: A tuple `(server_test_data, decode_params)` where these are: server_test_data: A `list` of `TestData` tuples containing numpy values representing the results of encoding for each element of `input_values`. decode_params: Numpy values of the decode parameters used. These are values that should be used if additional decoding is to be done, such as for `assert_commutes_with_sum`. """ def _adaptive_many_to_one_encode_decode(state): """Implementation of the method for `AdaptiveEncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): shape = input_values[0].shape if state is None: state = stage.initial_state() encode_params, decode_params = stage.get_params(state) with self.session(server_graph) as sess: encode_params, decode_params, state = self.evaluate_tf_py_list( [encode_params, decode_params, state], sess) client_test_data = [] for x in input_values: client_graph = tf.Graph()
tensorflow.Graph
10,003
import tensorflow as tf Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ assertions = [] assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name):
tensorflow.square
10,004
import tensorflow as tf _SLEEP_TIME = 1.0 class DynamicBatchingTest(tf.test.TestCase): def test_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners() result, batch_size = session.run(output)
tensorflow.shape
10,005
import tensorflow as tf var = var[:FLAGS.visualiza_max] var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max)
tensorflow.expand_dims
10,006
import tensorflow as tf W_fc1 = weight_variable([bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4], NUM_CLASSES()]) b_fc1 = bias_variable([NUM_CLASSES()]) out = tf.matmul(reshape, W_fc1) + b_fc1 print('fc') print('\t{} --> {}'.format(bottom.name, out.name))
tensorflow.matmul
10,007
import tensorflow as tf scale = keep_prob if mode == "recurrent" and len(args.get_shape().as_list()) == 3: noise_shape = [shape[0], 1, shape[-1]] args = tf.cond(is_train, lambda: tf.nn.dropout( args, keep_prob, noise_shape=noise_shape) * scale, lambda: args) return args def softmax_mask(val, mask): return -INF * (1 - tf.cast(mask, tf.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL]
tensorflow.cast
10,008
import tensorflow as tf tf.compat.v2.summary.scalar( name="orig_q_vals", data=tf.reduce_mean(orig_q_vals), step=global_step, ) # What are the average Q values of the relabelled tasks? indices = tf.transpose( tf.stack([orig_indices, tf.squeeze(relabel_indices)], axis=0)) relabel_q_vals = tf.gather_nd(logits_vec, indices) tf.compat.v2.summary.scalar( name="relabel_q_vals", data=tf.reduce_mean(relabel_q_vals), step=global_step, ) max_q = tf.reduce_max(logits_vec, axis=1) tf.compat.v2.summary.scalar(
tensorflow.gather_nd
10,009
import tensorflow as tf pred1 = tf.slice(batch, [0, 0], [num_sam, 1]) def uniform(): batch2 = tf.gather(batch, tf.random.shuffle(index)) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
tensorflow.random.shuffle
10,010
import tensorflow as tf :param inputdata: :param axis: :param name: :return: """ return tf.squeeze(input=inputdata, axis=axis, name=name) @staticmethod def deconv2d(inputdata, out_channel, kernel_size, padding='SAME', stride=1, w_init=None, b_init=None,
tensorflow.squeeze
10,011
import tensorflow as tf def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0]
tensorflow.gradients
10,012
import tensorflow as tf out_depth = out_size[0] out_height = out_size[1] out_width = out_size[2] zero = tf.zeros([], dtype='int32') # 0 <= z < depth, 0 <= y < height & 0 <= x < width. max_z = tf.to_int32(tf.shape(im)[1] - 1) max_y = tf.to_int32(tf.shape(im)[2] - 1) max_x = tf.to_int32(tf.shape(im)[3] - 1) # Converts scale indices from [-1, 1] to [0, width/height/depth]. x = (x + 1.0) * (width_f) / 2.0 y = (y + 1.0) * (height_f) / 2.0 z = (z + 1.0) * (depth_f) / 2.0 x0 = tf.to_int32(tf.floor(x)) x1 = x0 + 1 y0 = tf.to_int32(tf.floor(y)) y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width
tensorflow.floor
10,013
import tensorflow as tf x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = tf.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams) value = tf.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value} @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output."""
tensorflow.layers.dense
10,014
import tensorflow as tf rep_mask_comp = tf.concat([rep_mask, tf.cast(tf.zeros([bs, comp_len], tf.int32), tf.bool)], 1) rep_tensor_split = tf.reshape(rep_tensor_comp, [bs, block_num, block_len, input_dim]) # bs,bn,bl,d rep_mask_split = tf.reshape(rep_mask_comp, [bs, block_num, block_len]) # bs,bn,bl # non-linear
tensorflow.reshape
10,015
import tensorflow as tf encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state
tensorflow.reduce_sum
10,016
from tensorflow.python.keras.utils.generic_utils import register_keras_serializable ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale @register_keras_serializable( package='Vitis', name='VitisGlobalAveragePooling2D') class VitisGlobalAveragePooling2D(tf.keras.layers.GlobalAveragePooling2D): """Vitis version of GlobalAveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which to integer approximations for averaging of specific sizes. """ def __init__(self, **kwargs):
tensorflow.python.keras.utils.generic_utils.register_keras_serializable
10,017
import tensorflow as tf pad a vector with a zero row and gather with input inds """ if pad is None: pad = tf.expand_dims(tf.zeros_like(vecs[0]), 0) else: pad = tf.expand_dims(pad, 0)
tensorflow.zeros_like
10,018
import tensorflow as tf length = tf.reduce_sum(used, axis=1) length = tf.cast(length, tf.int64) return length @staticmethod def last_relevant(outputs, length): # Borrowed from: https://gist.github.com/rockt/f4f9df5674f3da6a32786bcf9fbb6a88 batch_size, max_length, hidden_size = tf.unstack(tf.shape(outputs)) index = tf.range(0, batch_size) * max_length + (tf.cast(length, tf.int32) - 1) flat = tf.reshape(outputs, [-1, hidden_size]) relevant = tf.gather(flat, index) return relevant
tensorflow.cast
10,019
import tensorflow as tf print("***************") print("Training done!!") save_path = saver.save(sess, ckpt_name) print("Model saved in file: %s" % save_path) print ("creating protobuf...") g_1 = tf.get_default_graph() with tf.Session(graph = g_1) as sess: saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
tensorflow.get_default_graph
10,020
import tensorflow as tf loss_fn: a callable, that returns loss perturb_norm_length: a `float`, Norm length of adversarial perturbation to be optimized with validatio Returns: adversial loss """ grad, = tf.gradients( loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) grad = tf.stop_gradient(grad) perturb = _scale_l2(grad, perturb_norm_length) return loss_fn(embedded + perturb) def virtual_adversarial_loss(logits, embedded, labels, length,
tensorflow.stop_gradient
10,021
import tensorflow as tf flat_labels = np.concatenate(k_labels, axis=-1).astype(np.float32) n_flat_labels = flat_labels.shape[1] n_rews = rb_data.reward.shape_as_list()[1] safe_rew_labels = np.pad( flat_labels, ((0, 0), (0, n_rews - n_flat_labels)), mode='constant') if as_tensor: return tf.to_float(safe_rew_labels) return safe_rew_labels # Pre-processor layers to remove observation from observation dict returned by # goal-conditioned point-mass environment.
tensorflow.to_float
10,022
from tensorflow.python.ops import gen_resource_variable_ops return None @property def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies():
tensorflow.python.ops.gen_resource_variable_ops.read_variable_op
10,023
import tensorflow as tf tf.reshape(means[:, :, :, 0, :], [inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3 ], axis=3) centered_inputs = inputs - means inv_stdv = tf.exp(-log_scales) plus_in = inv_stdv * (centered_inputs + 1. / 255.) cdf_plus = tf.nn.sigmoid(plus_in) min_in = inv_stdv * (centered_inputs - 1. / 255.) cdf_min = tf.nn.sigmoid(min_in) log_cdf_plus = plus_in - tf.nn.softplus(plus_in) log_one_minus_cdf_min = -tf.nn.softplus(min_in) cdf_delta = cdf_plus - cdf_min mid_in = inv_stdv * centered_inputs log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in) log_probs = tf.select( inputs < -0.999, log_cdf_plus, tf.select(
tensorflow.nn.sigmoid
10,024
import tensorflow as tf @auto_reuse_variable_scope def discriminator(self, img): with argscope(Conv2D, nl=INLReLU, kernel_shape=4, stride=2): l = (LinearWrap(img) .Conv2D('conv0', NF, nl=LeakyReLU) .Conv2D('conv1', NF * 2) .Conv2D('conv2', NF * 4) .Conv2D('conv3', NF * 8, stride=1) .Conv2D('conv4', 1, stride=1, nl=tf.identity, use_bias=True)()) return l def _build_graph(self, inputs): A, B = inputs with tf.name_scope('preprocess'): A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2]) B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2]) def viz3(name, a, b, c): with tf.name_scope(name): im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50) # use the initializers from torch with argscope([Conv2D, Deconv2D], use_bias=False,
tensorflow.transpose
10,025
from tensorflow.python.framework import random_seed fail_on_nan_loss=fail_on_nan_loss) def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None): if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'): return checkpoint_path = saver.latest_checkpoint(self._model_dir) eval_dir = os.path.join(self._model_dir, 'eval') with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g, output_dir=eval_dir,
tensorflow.python.framework.random_seed.set_random_seed
10,026
import tensorflow as tf "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer(), ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1])
tensorflow.nn.bias_add
10,027
import tensorflow as tf self.loss = tf.squared_difference(self.value_estimate, self.target) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize( self.loss, global_step=tf.contrib.framework.get_global_step()) def predict(self, state, sess=None):
tensorflow.contrib.framework.get_global_step
10,028
import tensorflow as tf memory_ = tf.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = tf.matmul(inputs_, tf.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1]) logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory) res = tf.concat([inputs, outputs], axis=2) with tf.variable_scope("gate"): dim = res.get_shape().as_list()[-1] d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
tensorflow.matmul
10,029
import tensorflow as tf nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = tf.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = [tf.size(nodes), tf.size(next_nodes)] next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse.reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.sparse.reorder
10,030
import tensorflow as tf else: all_logits.append(results[0]) all_top_1_ops.append(results[1]) all_top_5_ops.append(results[2]) if self.variable_mgr.retain_tower_updates(device_num): # Retain the Batch Normalization updates operations only from the # first tower. Ideally, we should grab the updates from all towers but # these stats accumulate extremely fast so we can ignore the other # stats from the other towers without significant detriment. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) staging_delta_ops = list(self.variable_mgr.staging_delta_ops) if not update_ops: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) enqueue_ops.append(tf.group(*gpu_copy_stage_ops)) if self.variable_mgr.supports_staged_vars(): for staging_ops in self.variable_mgr.staging_vars_on_devices: gpu_compute_stage_ops.extend( [put_op for _, (put_op, _) in six.iteritems(staging_ops)]) enqueue_ops.append(tf.group(*gpu_compute_stage_ops)) if gpu_grad_stage_ops: staging_delta_ops += gpu_grad_stage_ops if staging_delta_ops: enqueue_ops.append(tf.group(*(staging_delta_ops))) if not phase_train: if FLAGS.forward_only: all_logits = tf.concat(all_logits, 0) fetches = [all_logits] + enqueue_ops
tensorflow.group
10,031
import tensorflow as tf #final_state = rnn_outputs[-1] # 得到最后的state cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size) rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state) '''预测,损失,优化''' with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, num_classes]) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0)) '''因为rnn_outputs是三维的,这里需要将其转成2维的, 矩阵运算后再转换回来[batch_size, num_steps, num_classes]''' logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \ shape=[batch_size, num_steps, num_classes]) predictions = tf.nn.softmax(logits) y_as_list = tf.unstack(y, num=num_steps, axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits) total_loss = tf.reduce_mean(losses) train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss) '''训练网络'''
tensorflow.reshape
10,032
import tensorflow as tf body_outputs, losses = model.model_fn_sharded(new_features) if not isinstance(losses, dict): # If it's a single extra loss. losses = {"extra": losses} with tf.variable_scope(target_modality.name): new_sharded_logits = target_modality.top_sharded( body_outputs, sharded_features["targets"], dp)
tensorflow.variable_scope
10,033
import tensorflow as tf Haa = param_eta * prec + Waa # Haa = 0.5 * (Haa + TT.transpose(Haa)) HaaInv = tf.matrix_inverse(Haa) # The two terms 'term1' and 'term2' which come from normalizers of the # 1. Original policy distribution # 2. The distribution after completing the square sigma = tf.matrix_inverse(prec) term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma)) if self.beta == 0: term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv)) else: term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv)) dual = param_eta * self.epsilon - param_omega * beta + \ term1 + term2 + tf.reduce_mean( 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss))
tensorflow.matrix_determinant
10,034
from tensorflow.python.client import device_lib else: raise ValueError('Invalid model: {}'.format(FLAGS.model)) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or tf.__version__ < '1.3.0': config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError('data_path must be set') gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU' ] if FLAGS.num_gpus > len(gpus): raise ValueError('Invalid num_gpus') raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1
tensorflow.python.client.device_lib.list_local_devices
10,035
import tensorflow as tf return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels]) label = tf.placeholder(tf.float32, shape=[None,]) one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num) training_flag = tf.placeholder(tf.bool) learning_rate = tf.placeholder(tf.float32, name='learning_rate') logits, feat = resnet_model_fn(x, training=training_flag) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5)) l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num) Total_loss = cost + l2_loss optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(Total_loss) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/' # annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json'
tensorflow.trainable_variables
10,036
import tensorflow as tf model = build_model(args, self._multilingual_dp.meta, self._multilingual_dp.meta, name=name) return model def example_to_input(self, batch_of_data: dict, mode) -> dict: """ Transform the data examples to model acceptable inputs. Args: batch_of_data: A data tensor with shape [batch, ...] mode: The running mode. Returns: The input data for model. """ src = batch_of_data["feature"] if self._trg_lang_tag_position in ["src", "source"]: src = tf.concat([tf.expand_dims(batch_of_data["trg_lang"], axis=1), src], axis=1) if self._with_src_lang_tag: src = tf.concat([tf.expand_dims(batch_of_data["src_lang"], axis=1), src], axis=1) input_dict = {"src": src, "src_length": deduce_text_length(src, self._multilingual_dp.meta["pad_id"], self._multilingual_dp.meta["padding_mode"])} if self._trg_lang_tag_position in ["trg", "target"]: target_bos = batch_of_data["trg_lang"] else: target_bos = tf.tile([tf.convert_to_tensor( self._multilingual_dp.meta["bos_id"], dtype=tf.int64)], [tf.shape(src)[0]]) if mode == compat.ModeKeys.INFER: input_dict["trg_input"] = target_bos else:
tensorflow.expand_dims
10,037
import tensorflow as tf return tf.where(t < 1, 0.5 * t ** 2, t - 0.5) def MultiBoxLoss(num_class=2, neg_pos_ratio=3): """multi-box loss""" def multi_box_loss(y_true, y_pred): num_batch = tf.shape(y_true)[0] num_prior = tf.shape(y_true)[1] loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4]) landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10]) class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class]) loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4]) landm_true = tf.reshape(y_true[..., 4:14], [num_batch * num_prior, 10]) landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1]) class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1]) # define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore) # landm_valid = 1 (w landm), 0 (w/o landm) mask_pos = tf.equal(class_true, 1) mask_neg = tf.equal(class_true, 0) mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
tensorflow.reshape
10,038
import tensorflow as tf ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op) def _build(self, input_batch, is_training=True, test_local_stats=True):
tensorflow.add_to_collection
10,039
import tensorflow as tf out = spinn._bundle(lstm_iter) self.assertEqual(2, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual(tf.float32, out[1].dtype) self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T, out[0].numpy()) self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T, out[1].numpy()) def testUnbunbdle(self): with tf.device(self._test_device): state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32), np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)] out = spinn._unbundle(state) self.assertEqual(2, len(out)) self.assertEqual(tf.float32, out[0].dtype) self.assertEqual(tf.float32, out[1].dtype) self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]), out[0].numpy()) self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]),
tensorflow.device
10,040
import tensorflow as tf average_across_timesteps=True) res = sess.run(average_loss_per_example) self.assertAllClose(np.asarray([1.609438, 1.609438]), res) loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example( logits, targets, weights, average_across_timesteps=False) res = sess.run(loss_per_sequence)
tensorflow.nn.seq2seq.sequence_loss_by_example
10,041
import tensorflow as tf p = self.params with tf.variable_scope(p.name):
tensorflow.variable_scope
10,042
from tensorflow.contrib.framework import tensor_util pearson_r: A tensor representing the current Pearson product-moment correlation coefficient, the value of `cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. update_op: An operation that updates the underlying variables appropriately. Raises: ValueError: If `labels` and `predictions` are of different sizes, or if `weights` is the wrong size, or if either `metrics_collections` or `updates_collections` are not a `list` or `tuple`. """ with variable_scope.variable_scope(name, 'pearson_r', [predictions, labels]): predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) cov, update_cov = streaming_covariance( predictions, labels, weights=weights, name='covariance') var_predictions, update_var_predictions = streaming_covariance( predictions, predictions, weights=weights, name='variance_predictions') var_labels, update_var_labels = streaming_covariance( labels, labels, weights=weights, name='variance_labels') pearson_r = _safe_div(
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
10,043
import tensorflow as tf num_classes = labels.get_shape()[-1].value labels = tf.cast(labels, predictions.dtype)
tensorflow.cast
10,044
import tensorflow as tf logger.info(f'load devset-{i} {len(devdata)}') logger.info('Load the first task saver!') saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()]) logger.info('Update all copies! (lazymodel, normalizers_copy)') tf.get_default_session().run(sync_model_to_lazymodel) tf.get_default_session().run(copy_normalizers) logger.info('Loaded normalizers:') load_norm = tf.get_default_session().run(normalizers_parameters) logger.info(load_norm) TASK_NUM = 1
tensorflow.get_default_session
10,045
import tensorflow as tf if self.ctx2out: w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += tf.matmul(context, w_ctx2out)
tensorflow.matmul
10,046
import tensorflow as tf config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = tf.contrib.rnn.DropoutWrapper(
tensorflow.contrib.rnn.BasicLSTMCell
10,047
import tensorflow as tf w = variable_with_weight_decay([n_in, output_dim], initializer, l2_strength) variable_summaries(w) if isinstance(bias, float): bias = tf.get_variable("biases", [output_dim], tf.float32, tf.constant_initializer(bias)) variable_summaries(bias) output = tf.nn.bias_add(tf.matmul(x, w), bias) return output def _bn(self, name, x): with tf.variable_scope(name): moving_average_decay = 0.9 decay = moving_average_decay
tensorflow.matmul
10,048
import tensorflow as tf rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = tf.logical_and(rep_mask, dep_selection) head_selection = tf.logical_and(rep_mask, head_selection)
tensorflow.variable_scope
10,049
import tensorflow as tf def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag
tensorflow.matmul
10,050
import tensorflow as tf inc.eval() save.save(sess, filepath, global_step=2) with self.test_session() as sess: # Build a new graph with different initialization. v0 = tf.Variable(-1.0) # Create a new saver. save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables().run() # Get the most recent checkpoint name from the training history file. name = tf.train.latest_checkpoint(traindir) self.assertIsNotNone(name) # Restore "v0" from that checkpoint. save.restore(sess, name) self.assertEqual(v0.eval(), 2.0)
tensorflow.initialize_all_variables
10,051
import tensorflow as tf logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else: return logits
tensorflow.minimum
10,052
import tensorflow as tf def apply_gradients(self, *args, **kwargs): return self._optimizer.apply_gradients(*args, **kwargs) def main(argv=None): start1 = time.time() import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps') input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks') global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True) # add summary
tensorflow.gfile.MkDir
10,053
import tensorflow as tf tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train']) weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights) tf.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train']) self.train_summary = tf.summary.merge_all(key='train') self.eval_summary = tf.summary.merge_all(key='eval') self.saver = tf.train.Saver(tf.global_variables()) def separate_gradient_update(self): denoise_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "denoising_model")
tensorflow.summary.merge_all
10,054
import tensorflow as tf return None for var in self.get_variables_in_scope(): # TODO: different summary types tf.summary.scalar(var.name, tf.reduce_mean(var)) self._summary_added = True def get_variables_in_scope(self): assert self.template._variables_created, "Variables not yet created or undefined." variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.variable_scope_name) return variables @property def template(self): return self._template @property
tensorflow.get_collection
10,055
import tensorflow as tf
tensorflow.expand_dims
10,056
import tensorflow as tf Returns: Variable tensor """ with tf.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) if activation_fn is not None: outputs = tf.nn.leaky_relu(outputs, alpha=0.2) return outputs def conv2d_transpose(inputs,
tensorflow.nn.conv2d
10,057
import tensorflow as tf actor.add_grad_to_graph(critic.a_grads) M = Memory(MEMORY_CAPACITY) saver = tf.train.Saver(max_to_keep=100) if LOAD_MODEL: all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths saver.restore(sess, all_ckpt[-1]) else: if os.path.isdir(DATA_PATH): shutil.rmtree(DATA_PATH) os.mkdir(DATA_PATH) sess.run(tf.global_variables_initializer()) if OUTPUT_GRAPH: tf.summary.FileWriter('logs', graph=sess.graph) var = 3 # control exploration var_min = 0.01 for i_episode in range(MAX_EPISODES): # s = (hull angle speed, angular velocity, horizontal speed, vertical speed, position of joints and joints angular speed, legs contact with ground, and 10 lidar rangefinder measurements.) s = env.reset() ep_r = 0 while True: if RENDER: env.render() a = actor.choose_action(s) a = np.clip(np.random.normal(a, var), -1, 1) # add randomness to action selection for exploration s_, r, done, _ = env.step(a) # r = total 300+ points up to the far end. If the robot falls, it gets -100.
tensorflow.summary.FileWriter
10,058
import tensorflow as tf conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME')
tensorflow.nn.conv2d
10,059
import tensorflow as tf task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
tensorflow.contrib.cluster_resolver.TPUClusterResolver
10,060
import tensorflow as tf """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100)
tensorflow.data.TFRecordDataset
10,061
from tensorflow.contrib.learn.python.learn.datasets import base test_path = os.path.join(module_path, 'data', 'text_test.csv') train = base.load_csv_without_header( train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) test = base.load_csv_without_header( test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) return base.Datasets(train=train, validation=None, test=test)
tensorflow.contrib.learn.python.learn.datasets.base.load_csv_without_header
10,062
import tensorflow as tf "step-{}-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format(step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == 1: # Get current checkpoint state checkpoint_state = tf.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path) log("Tacotron Character embeddings have been updated on tensorboard!") log("Tacotron training complete after {} global steps!".format( args.tacotron_train_steps), slack=True)
tensorflow.train.get_checkpoint_state
10,063
import tensorflow as tf """ assert len(input_tensor.get_shape()) == 2 assert len(idx.get_shape()) == 1 idx_flattened = tf.range(0, input_tensor.shape[0], dtype=tf.int64) * input_tensor.shape[1] + idx offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input idx_flattened) # use flattened indices
tensorflow.range
10,064
import tensorflow as tf with tf.control_dependencies([perturb_for_adaption]): update_scale_expr = tf.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_thres_expr = param_noise_threshold.assign( tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1) deterministic_actions = tf.argmax(policy.q_values, axis=1) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
tensorflow.cond
10,065
import tensorflow.contrib.graph_editor as ge for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
tensorflow.contrib.graph_editor.get_backward_walk_ops
10,066
import tensorflow as tf with tf.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = tf.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with tf.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0] def build_model(self): """ Builds the manager and worker models. """ with tf.variable_scope('FeUdal'): self.build_placeholders() self.build_perception() self.build_manager() self.build_worker() self.build_loss() self.var_list = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) # for v in self.var_list: # print v.name self.state_in = [self.worker_lstm.state_in[0], self.worker_lstm.state_in[1], self.manager_lstm.state_in[0], self.manager_lstm.state_in[1]
tensorflow.variable_scope
10,067
import tensorflow as tf infer_step, [result, logits, loss], shape_invariants=[ tf.TensorShape([None, None, None, None]), tf.TensorShape([None, None, None, None, None]), tf.TensorShape([]), ], back_prop=False, parallel_iterations=1)
tensorflow.TensorShape
10,068
import tensorflow as tf observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ) @property def observ(self): """Access the variable holding the current observation.""" return self._observ
tensorflow.scatter_update
10,069
import tensorflow as tf valid_pre = tf.argmax(valid_pre, 1) valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False):
tensorflow.initialize_all_variables
10,070
import tensorflow as tf for _ in range(buffer_length): buf.append(tf.random_normal((batch_size, size * 2)))
tensorflow.random_normal
10,071
import tensorflow as tf def apply_optimizers(objectives, trainer, config): # Make sure all losses are computed and apply loss scales. processed = [] values = [ob.value for ob in objectives] for ob in objectives: loss = {min: ob.value, max: -ob.value}[ob.goal] loss *= config.loss_scales[ob.name] with tf.control_dependencies(values): loss = tf.identity(loss) processed.append(ob._replace(value=loss, goal=min)) # Merge objectives that operate on the whole model to compute only one # backward pass and to share optimizer statistics. objectives = [] losses = [] for ob in processed: if ob.include == r'.*' and ob.exclude is None: assert ob.goal == min losses.append(ob.value)
tensorflow.identity
10,072
import tensorflow as tf self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3 mean, var = tf.nn.moments(tgtimg_z, axes=[0]) print(var.get_shape()) # self.simloss /= tf.reduce_mean(var) print(tgtimg_z.get_shape()) self.out = output_h4# + contextimg#tf.nn.tanh(h4) self.out2 = truthoutput_h4 self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2": self.loss = self.recon1 + self.recon2 elif ablation_type == "L2L3":
tensorflow.nn.l2_loss
10,073
import tensorflow as tf out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels]) dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training) dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same") dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME") dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training)) dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME") dec = tf.layers.batch_normalization(dec, training=self.training) dec = tf.layers.dense(dec, embed_size // 2) for i in range(4): dec = highwaynet( dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i) ) with tf.variable_scope("decoder-gru", reuse=False): cell = tf.contrib.rnn.GRUCell(embed_size // 2) cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2) self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y)) self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z)) self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # In[3]: tf.reset_default_graph()
tensorflow.contrib.rnn.GRUCell
10,074
import tensorflow as tf N = len(imglist) filenames = tf.constant([k[0] for k in imglist], name='filenames') labels = tf.constant([k[1] for k in imglist], dtype=tf.int32, name='labels') ds = tf.data.Dataset.from_tensor_slices((filenames, labels)) if isTrain: ds = ds.shuffle(N, reshuffle_each_iteration=True).repeat()
tensorflow.data.Dataset.from_tensor_slices
10,075
import tensorflow as tf Args: sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss can be a single Tensor or a 2-tuple (numerator and denominator). Returns: losses: dict<str loss_name, Tensor avg_loss> """ losses = {} for loss_name in sharded_losses[0]: all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses] if isinstance(all_shards[0], tuple): sharded_num, sharded_den = zip(*all_shards) mean_loss = ( tf.add_n(sharded_num) / tf.maximum(1.0, tf.add_n(sharded_den))) else: mean_loss = tf.reduce_mean(all_shards) losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with tf.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
tensorflow.add_n
10,076
import tensorflow as tf # Hidden 1 images = tf.constant(1.2, tf.float32, shape=[100, 28]) with tf.name_scope("hidden1"): weights = tf.Variable( tf.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = tf.Variable(tf.zeros([128]), name="biases") hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope("hidden2"): weights = tf.Variable( tf.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))), name="weights") biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope("softmax_linear"): weights = tf.Variable(
tensorflow.name_scope
10,077
import tensorflow as tf for ob in processed: if ob.include == r'.*' and ob.exclude is None: assert ob.goal == min losses.append(ob.value) else: objectives.append(ob) objectives.append(Objective('main', tf.reduce_sum(losses), min, r'.*', None)) # Apply optimizers and collect loss summaries. summaries = [] grad_norms = {} # for ob in processed:
tensorflow.reduce_sum
10,078
import tensorflow as tf with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if clip: log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
tensorflow.one_hot
10,079
import tensorflow as tf tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=20, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores,
tensorflow.contrib.cluster_resolver.TPUClusterResolver
10,080
import tensorflow as tf #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h)
tensorflow.matmul
10,081
from tensorflow.python.ops import variable_scope W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size]) self.W_h = W_h encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size] encoder_features = tf.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size]) return encoder_features def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask): options = self.options with variable_scope.variable_scope("attention_decoder"): v = variable_scope.get_variable("v", [options.attention_vec_size]) v = tf.expand_dims(tf.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = tf.expand_dims(tf.expand_dims(w_c, axis=0), axis=0) word_t_representation = self.embedding_lookup(word_t) (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder(
tensorflow.python.ops.variable_scope.get_variable
10,082
from tensorflow.contrib import metrics as metrics_lib result = {"loss": metrics_lib.streaming_mean(self._loss( logits, targets, weight_tensor=self._get_weight_tensor(features)))} # Adding default metrics if metrics is None and self._n_classes > 1: metrics = {"accuracy": metrics_lib.streaming_accuracy} if self._n_classes == 2: predictions = math_ops.sigmoid(logits) result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets) if metrics: predictions = self._logits_to_predictions(logits, proba=False) result.update(self._run_metrics(predictions, targets, metrics, self._get_weight_tensor(features))) return result def _get_predict_ops(self, features):
tensorflow.contrib.metrics.streaming_auc
10,083
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") loss = tf.metrics.mean(per_example_loss) return { "eval_precision":precision, "eval_recall":recall, "eval_f": f,
tensorflow.metrics.mean
10,084
import tensorflow as tf image = tensor_dict[fields.InputDataFields.image] preprocessed_resized_image, true_image_shape = model_preprocess_fn( tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0)) if use_bfloat16: preprocessed_resized_image = tf.cast( preprocessed_resized_image, tf.bfloat16) tensor_dict[fields.InputDataFields.image] = tf.squeeze( preprocessed_resized_image, axis=0) tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze( true_image_shape, axis=0) if fields.InputDataFields.groundtruth_instance_masks in tensor_dict: masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] _, resized_masks, _ = image_resizer_fn(image, masks) if use_bfloat16: resized_masks = tf.cast(resized_masks, tf.bfloat16) tensor_dict[fields.InputDataFields. groundtruth_instance_masks] = resized_masks # Transform groundtruth classes to one hot encodings. label_offset = 1 zero_indexed_groundtruth_classes = tensor_dict[ fields.InputDataFields.groundtruth_classes] - label_offset tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( zero_indexed_groundtruth_classes, num_classes) if use_multiclass_scores: tensor_dict[fields.InputDataFields.groundtruth_classes] = tensor_dict[ fields.InputDataFields.multiclass_scores]
tensorflow.cast
10,085
import tensorflow as tf layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None else: layer = tf.contrib.layers.batch_norm(layer, is_training=False, center=True, scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None elif norm_type == 'layer_norm': # layer_norm # Take activation_fn out to apply lrelu try: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs)) # updates_collections=None except ValueError: layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True, scale=False, scope=vs, reuse=True)) elif norm_type == 'selu': layer = selu(layer)
tensorflow.contrib.layers.layer_norm
10,086
import tensorflow as tf # Unstack the inputs to obtain a list of batches, one for each time step. chars = tf.unstack(chars, axis=0) for ch in chars: output, state = cell(ch, state) outputs.append(output) # The outputs of this layer are the inputs of the subsequent layer. chars = tf.stack(outputs, axis=0) if training: chars = tf.nn.dropout(chars, self.keep_prob) # Extract the correct output (i.e., hidden state) for each example. All the # character sequences in this batch were padded to the same fixed length so # that they could be easily fed through the above RNN loop. The # `sequence_length` vector tells us the true lengths of the character # sequences, letting us obtain for each sequence the hidden state that was # generated by its non-padding characters. batch_range = [i for i in range(batch_size)]
tensorflow.nn.dropout
10,087
import tensorflow as tf test_inputs: The test inputs for tflite. input_tensor: The input tensor of tensorflow graph. output_tensor: The output tensor of tensorflow graph. use_mlir_converter: Whether or not to use MLIRConverter to convert the model. Returns: The tflite inference result. """ converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor], [output_tensor]) tflite = converter.convert() converter.experimental_enable_mlir_converter = use_mlir_converter interpreter = tf.lite.Interpreter(model_content=tflite) try: interpreter.allocate_tensors()
tensorflow.lite.TFLiteConverter.from_session
10,088
import tensorflow as tf self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) _, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] dec, mem = tf.nn.seq2seq.embedding_rnn_decoder( dec_inp, enc_state, cell, num_symbols=4, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec)
tensorflow.nn.rnn_cell.BasicLSTMCell
10,089
import tensorflow as tf ##################################################################################### # https://github.com/yaroslavvb/memory_util import sys import tensorflow as tf import memory_util memory_util.vlog(1) sess = tf.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print("tensors:", tensor, {'2': tensor * 2}, output_stream=sys.stderr) with tf.control_dependencies([print_op]): tripled_tensor = tensor * 3 with memory_util.capture_stderr() as stderr: print(sess.run(tripled_tensor)) print(stderr.getvalue())
tensorflow.print
10,090
import tensorflow as tf predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
tensorflow.logging.info
10,091
import tensorflow as tf # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))): mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss') tf.summary.scalar('mse', mse_loss) tf.losses.add_loss(mse_loss) # bce_loss_list = [] # for pred_ind in list(range(len(pred_outputs))): # bce_loss_list.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred_outputs[pred_ind], labels=targets_list[pred_ind]/255., name='loss_{}'.format(pred_ind)), name='loss_mean_{}'.format(pred_ind))) # mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss') # tf.summary.scalar('mse', mse_loss) # tf.losses.add_loss(mse_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name])
tensorflow.losses.add_loss
10,092
import tensorflow as tf # Init optimizer opt = tf.train.MomentumOptimizer(lr, opt_momentum, use_locking=True, use_nesterov=True)
tensorflow.train.MomentumOptimizer
10,093
import tensorflow as tf :return: sample from the MVN of shape N x D """ eps = tf.random_normal(tf.shape(mean), dtype=settings.float_type) # N x P if cov_structure == "diag": sample = mean + tf.sqrt(cov) * eps # N x P elif cov_structure == "full": cov = cov + (tf.eye(tf.shape(mean)[1], dtype=settings.float_type) * settings.numerics.jitter_level)[None, ...] # N x P x P chol = tf.cholesky(cov) # N x P x P
tensorflow.sqrt
10,094
import tensorflow as tf # Categorical Discrimminator Loss dc_c_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real)) dc_c_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake)) dc_c_loss = dc_c_loss_fake + dc_c_loss_real # Generator loss generator_g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_fake), logits=d_g_fake)) generator_c_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_fake), logits=d_c_fake)) generator_loss = generator_c_loss + generator_g_loss # Supervised Encoder Loss supervised_encoder_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=encoder_output_label_)) all_variables = tf.trainable_variables() dc_g_var = [var for var in all_variables if 'dc_g_' in var.name] dc_c_var = [var for var in all_variables if 'dc_c_' in var.name] en_var = [var for var in all_variables if 'e_' in var.name]
tensorflow.ones_like
10,095
import tensorflow as tf sigma -- Fermi-Dirac distribution """ if activation == "esp" or activation == "softmax": A = tf.multiply(h, tf.nn.sigmoid(tf.multiply(beta,h)) ) elif activation == "sigmoid": A = tf.nn.sigmoid(tf.multiply(beta,h))
tensorflow.multiply
10,096
import tensorflow as tf """Instantiates discriminator fromRGB layers of 1x1 convs. Args: params: dict, user passed parameters. Returns: List of fromRGB 1x1 Conv2D layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get fromRGB layer properties. from_rgb = [ params["discriminator_from_rgb_layers"][i][0][:] for i in range(len(params["discriminator_from_rgb_layers"])) ] # Create list to hold toRGB 1x1 convs.
tensorflow.variable_scope
10,097
import tensorflow as tf """ Define RNN graph """ def build_multilayer_rnn_graph_with_dynamic_rnn(cell_type, activation,state_size, num_steps, num_layers, input_size_x, input_size_y , learning_rate, lambda_l2_reg,random_seed=0): reset_graph() tf.set_random_seed(random_seed) #make reproducible results input_size_x += input_size_y """Define the graph inputs""" batch_size = tf.placeholder(tf.int32, [], name='batch_size')
tensorflow.set_random_seed
10,098
import tensorflow as tf facts = tf.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask
tensorflow.layers.dense
10,099