seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1]) # Use the logical operations to create a mask indicator = tf.less(range_tiled, lengths_tiled) sz = [batch_size, max_sequence_len] self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz)) def _DoPredictions(self, in_size, mats, class_weights=None): """Takes in an array of states and calculates predictions. Get the cross-entropy for each example in the vector self._xent.
tensorflow.zeros
10,100
import tensorflow as tf init = tf.global_variables_initializer() if FLAGS.pretrained_model_path is not None: variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(), ignore_missing_vars=True) config = tf.ConfigProto() custom_op = config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["use_off_line"].b = True # 在昇腾AI处理器执行训练 config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 关闭remap开关 if FLAGS.allow_mix_precision: custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") if FLAGS.auto_tune: custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") with tf.Session(config=config) as sess: if FLAGS.restore: print('continue training from previous checkpoint') ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path) saver.restore(sess, ckpt) else: sess.run(init) if FLAGS.pretrained_model_path is not None: variable_restore_op(sess) data_generator = icdar.get_batch(num_workers=FLAGS.num_readers, input_size=FLAGS.input_size,
tensorflow.compat.as_bytes
10,101
import tensorflow as tf test_acc /= test_iteration # average accuracy summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss), tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)]) return test_acc, test_loss, summary
tensorflow.Summary.Value
10,102
import tensorflow as tf def _build_net(self): # we use parameter sharing among agents with tf.variable_scope(self.name): # ------------------ all inputs ------------------------ self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1 self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1 self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ??? self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max') self.q_target = tf.placeholder(tf.float32, [None,], name='q_tot_target') w_initializer, b_initializer = tf.random_normal_initializer(0., 0.1), tf.constant_initializer(0.0) # ------------------ build evaluate_net ------------------ with tf.variable_scope('eval_net'):
tensorflow.placeholder
10,103
import tensorflow as tf
tensorflow.variable_scope
10,104
import tensorflow as tf num_episodes = tf.minimum(x=num_episodes, y=self.episode_count) assignment = tf.assign( ref=self.episode_indices[:self.episode_count - num_episodes], value=self.episode_indices[num_episodes: self.episode_count] ) # Decrement episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes) # Assign new observations. with tf.control_dependencies(control_inputs=(assignment,)): assignments = list() for name in sorted(states): assignments.append(tf.scatter_update( ref=self.states_memory[name], indices=indices, updates=states[name] )) for name in sorted(internals): assignments.append(tf.scatter_update( ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name],
tensorflow.scatter_update
10,105
import tensorflow as tf with tf.name_scope(name): logcost = log_quaternion_loss_batch(predictions, labels) logcost = tf.reduce_sum(logcost, [0]) logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')
tensorflow.reduce_sum
10,106
import tensorflow as tf self.assertAllEqual(p.eval(), data2.eval()) def testInitRequiredAssignAdd(self): with self.test_session(): p = tf.Variable(tf.fill([1024, 1024], 1), tf.int32) a = tf.assign_add(p, tf.fill([1024, 1024], 0)) with self.assertRaisesOpError("use uninitialized"): a.op.run() def testInitRequiredAssignSub(self): with self.test_session(): p = tf.Variable(tf.fill([1024, 1024], 1), tf.int32) a = tf.assign_sub(p, tf.fill([1024, 1024], 0)) with self.assertRaisesOpError("use uninitialized"): a.op.run() # NOTE(mrry): See also # dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign # data race and must run without TSAN. def testParallelUpdateWithLocking(self): with self.test_session() as sess: zeros_t = tf.fill([1024, 1024], 0.0)
tensorflow.fill
10,107
import tensorflow as tf hn = tf.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval #Create a saver for the Model if save_model == True: saver = tf.train.Saver() #-----------------Initialize the cost and gradients---------------------------------------------------------
tensorflow.train.Saver
10,108
import tensorflow as tf tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query,
tensorflow.shape
10,109
import tensorflow as tf csshape = confidence_scores.get_shape() self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape)) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. wvs = tf.pack(self._inputs) wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]), tf.reshape(wvs, [-1, in_size])) wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape()) wvsum = tf.reduce_sum(wvs_weighted_reshaped,0) pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum)
tensorflow.reduce_sum
10,110
import tensorflow as tf means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if self.hparams.soft_em: nearest_idx = tf.stack( [ tf.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) nearest_idx = tf.gather( top_k_idx, tf.random_uniform( [1], minval=0, maxval=self.hparams.random_top_k - 1, dtype=tf.int32), axis=-1) else: if self.hparams.use_scales: dist /= tf.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = tf.argmax(-dist, axis=-1)
tensorflow.nn.top_k
10,111
from tensorflow.python.ops import image_ops from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import image_ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test def _resize_image(image, height, width): image = array_ops.expand_dims(image, 0) image = image_ops.resize_bilinear(image, [height, width]) return array_ops.squeeze(image, [0]) def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1) keys_to_features = { 'image/encoded':
tensorflow.python.ops.image_ops.resize_bilinear
10,112
import tensorflow as tf self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0] def build_model(self):
tensorflow.group
10,113
from tensorflow.python.ops import gen_resource_variable_ops def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._read_variable_op() return assign_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): del use_locking
tensorflow.python.ops.gen_resource_variable_ops.assign_variable_op
10,114
import tensorflow as tf trainable=False) self._moving_second_moment = tf.get_variable( "moving_second_moment", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.ones_initializer(), trainable=False) self._moving_variance = tf.sub(self._moving_second_moment, tf.square(self._moving_mean), name="moving_variance")
tensorflow.ones_initializer
10,115
import tensorflow as tf loss=total_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
tensorflow.reshape
10,116
from tensorflow.python.ops import check_ops validate_args: Python `Boolean`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: `String` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[rate]) as ns: with ops.control_dependencies([check_ops.assert_positive(rate)] if validate_args else []): self._rate = array_ops.identity(rate, name="rate") super(Poisson, self).__init__( dtype=self._rate.dtype, is_continuous=False, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._rate], name=ns)
tensorflow.python.ops.check_ops.assert_positive
10,117
import tensorflow as tf add_moving_summary(recon_loss_A, recon_loss_B, self.g_loss, self.d_loss) def _get_optimizer(self): lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False) return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3) def get_data(datadir, isTrain=True):
tensorflow.train.AdamOptimizer
10,118
import tensorflow as tf def parse_function(example_proto): features = {'member/name': tf.io.FixedLenFeature([], tf.string), 'member/encoded': tf.io.FixedLenFeature([], tf.string), 'member/age': tf.io.FixedLenFeature([], tf.int64), 'member/height': tf.io.VarLenFeature(tf.float32), 'member/prefer_prods': tf.io.VarLenFeature(tf.int64)} features = tf.io.parse_single_example(example_proto, features) images = tf.image.decode_png(features['member/encoded'], channels=3) # 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。
tensorflow.io.VarLenFeature
10,119
import tensorflow as tf # Final Evaluation if training is finished. if current_step >= hparams.num_epochs * train_steps_per_epoch: eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks) tf.logging.info('Evaluation results: %s' % eval_results) while current_step < hparams.num_epochs * train_steps_per_epoch: image_classifier.train( input_fn=imagenet_train.input_fn, steps=train_steps_per_eval) current_step += train_steps_per_eval tf.logging.info('Starting evaluation at step=%d.' % current_step) eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks) tf.logging.info('Evaluation results: %s' % eval_results) elif mode == 'predict': for checkpoint in _get_next_checkpoint(): tf.logging.info('Starting prediction ...') time_hook = model_lib.SessionTimingHook() eval_hooks.append(time_hook) result_iter = image_classifier.predict( input_fn=imagenet_eval.input_fn, hooks=eval_hooks, checkpoint_path=checkpoint, yield_single_examples=False) results = list(itertools.islice(result_iter, eval_steps)) tf.logging.info('Inference speed = {} images per second.'.format( time_hook.compute_speed(len(results) * eval_batch_size))) elif mode == 'train': current_step = _load_global_step_from_checkpoint_dir(model_dir) total_step = int(hparams.num_epochs * train_steps_per_epoch) if current_step < total_step:
tensorflow.logging.info
10,120
import tensorflow as tf [layer_input[0]['observation'], layer_input[1]], axis=1) return tf.keras.layers.Lambda(f)
tensorflow.keras.layers.Lambda
10,121
import tensorflow as tf tf.logging.info("*** Input Validation Files ***")
tensorflow.logging.info
10,122
import tensorflow as tf with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(
tensorflow.summary.scalar
10,123
import tensorflow as tf self._initializers = { "w": tf.contrib.layers.xavier_initializer(), } self._regularizers = { 'w': tf.contrib.layers.l2_regularizer(config.l2) } self._construct_placeholders() self._construct_weights() self._construct() tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0]) self.summary = tf.summary.merge_all() def _construct(self): """ Construct the model; main part of it goes here """ self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers, regularizers=self._regularizers, name='OutputVector') self.score = tf.squeeze(self.v(self._cur_user * self._cur_item)) negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))
tensorflow.get_collection
10,124
import tensorflow as tf c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) _zero = tf.constant(0,dtype=_x.dtype) # return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) return tf.maximum(_zero, _x) + _alpha * tf.minimum(_zero, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """
tensorflow.minimum
10,125
import tensorflow as tf height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
tensorflow.random_uniform
10,126
from tensorflow.python.ops import variables expected_unclipped = np.array([(2.0 - 3.0 * 0.1), (3.0 - 3.0 * 0.1)]) self.assertAllCloseAccordingToType(2.0 * expected_unclipped / np.linalg.norm(expected_unclipped), var0_out[1]) # var1 is not in the var list, so it should not be clipped self.assertAllCloseAccordingToType([4.0 - 3.0 * 0.01, 5.0 - 3.0 * 0.01], var1.eval()) def _setupSparse(self, is_distributed, dtype): with self._maybeWithDevice("/job:ps" if is_distributed else None): var0 = variables.Variable( [[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]], dtype=dtype) var1 = variables.Variable( [[0.0, 1.0], [0.0, 3.0], [0.0, 5.0]], dtype=dtype) with self._maybeWithDevice("/job:worker" if is_distributed else None): grads = ops.IndexedSlices( constant_op.constant( [[0.1, 0.1], [0.1, 0.1]], dtype=dtype), [0, 2], [3, 2]) sgd = gradient_descent.GradientDescentOptimizer(3.0) clip_opt = variable_clipping_optimizer.VariableClippingOptimizer( sgd, {var0: [1], var1: [0]}, 2.0) update_op = clip_opt.apply_gradients( list(zip([grads, grads], [var0, var1])))
tensorflow.python.ops.variables.Variable
10,127
import tensorflow as tf else: l2_shape = tf.identity(x_shape) # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = tf.identity(x) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = tf.random_normal(x_shape, dtype=self.dtype) l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype) l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) else: raise RuntimeError # While loop elems = [ i0, x, l1_h2, l2_h2, l3_h2 ] returned = tf.while_loop(
tensorflow.zeros
10,128
from tensorflow.contrib.layers.python.layers import utils _add_variable_to_collections(layer.bias, variables_collections, 'biases') if normalizer_fn is not None: normalizer_params = normalizer_params or {} outputs = normalizer_fn(outputs, **normalizer_params) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
tensorflow.contrib.layers.python.layers.utils.collect_named_outputs
10,129
import tensorflow as tf assert self.fc6.get_shape().as_list()[1:] == [4096] self.relu6 = tf.nn.relu(self.fc6) self.fc7 = self.fc_layer(self.relu6, "fc7") self.relu7 = tf.nn.relu(self.fc7) self.fc8 = self.fc_layer(self.relu7, "fc8") log("finished building VGG19 in %ds" % (time.time() - start_time)) return self.fc8 def avg_pool(self, bottom, name): return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases)
tensorflow.nn.avg_pool
10,130
import tensorflow as tf if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = _resize_bilinear( scales_to_logits[MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], scales_to_logits[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = _resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype)
tensorflow.shape
10,131
from tensorflow.python.ops import state_ops max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean') update = _safe_scalar_div(total_update, max_update, name=scope)
tensorflow.python.ops.state_ops.assign_add
10,132
import tensorflow as tf max_x = tf.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=tf.float32) # min_y = tf.cast(0.0 - labeled_sizes[i][1] / 2.0, dtype=tf.float32) # max_y = tf.cast(0.0 + labeled_sizes[i][1] / 2.0, dtype=tf.float32) min_z = tf.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=tf.float32) max_z = tf.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=tf.float32) translation = tf.reshape([labeled_translations[i][0], labeled_translations[i][2]], [2, 1]) pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation
tensorflow.reshape
10,133
import tensorflow as tf eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0)) q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func") q_values = q_func_results['q'] s_value = q_func_results['s'] a_values = q_func_results['a'] deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
tensorflow.argmax
10,134
import tensorflow as tf pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32)
tensorflow.cast
10,135
import tensorflow as tf # scaffold_fn=tf.train.Scaffold(init_fn=train_scafflod) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization_multigpus.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
tensorflow.logging.info
10,136
from tensorflow.python.framework import ops ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add") @ops.RegisterShape("Complex") @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul") @ops.RegisterShape("NotEqual")
tensorflow.python.framework.ops.RegisterShape
10,137
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell input_size_ = input_size if j == 0 else cell_output_size if decoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse)) elif decoder.cell_type.lower() == 'plstm': cell = PLSTM(decoder.cell_size, reuse=reuse, fact_size=decoder.lstm_fact_size, proj_size=decoder.lstm_proj_size) elif decoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm, input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob) else: cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm) if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob, output_keep_prob=decoder.rnn_output_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob, variational_recurrent=decoder.pervasive_dropout, dtype=tf.float32, input_size=input_size_) cells.append(cell) if len(cells) == 1: return cells[0] else: return CellWrapper(MultiRNNCell(cells)) def look(time, state, input_, prev_weights=None, pos=None, context=None): prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))] pos_ = None
tensorflow.contrib.rnn.DropoutWrapper
10,138
from tensorflow.python.framework import ops Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add(mm, biases, name=name) def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name """Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units
tensorflow.python.framework.ops.convert_to_tensor
10,139
import tensorflow as tf ] P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info) elif K.backend() == 'tensorflow': import tensorflow as tf # Create mask that can be used to gather elements from L_flat and put them # into a diagonal matrix. diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32') diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1) # Add leading zero element to each element in the L_flat. We use this zero # element when gathering L_flat into a lower triangular matrix L. nb_rows = tf.shape(L_flat)[0] zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1) try: # Old TF behavior. L_flat = tf.concat(1, [zeros, L_flat]) except TypeError: # New TF behavior L_flat = tf.concat([zeros, L_flat], 1) # Finally, process each element of the batch. def fn(a, x): x_ = tf.gather(x, diag_mask)
tensorflow.shape
10,140
import tensorflow as tf num_prior = tf.shape(y_true)[1] loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4]) landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10])
tensorflow.reshape
10,141
import tensorflow as tf with tf.name_scope('data_loading'):
tensorflow.name_scope
10,142
import tensorflow as tf """Checks that `perm` is valid.""" with tf.name_scope(name, 'maybe_validate_perm', [perm]): assertions = [] if not perm.dtype.is_integer: raise TypeError('`perm` must be integer type') msg = '`perm` must be a vector.' if perm.shape.ndims is not None: if perm.shape.ndims != 1: raise ValueError( msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims)) elif validate_args: assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)] perm_ = tf.get_static_value(perm) msg = '`perm` must be a valid permutation vector.' if perm_ is not None: if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)): raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_)) elif validate_args: assertions += [ tf.compat.v1.assert_equal( tf.sort(perm), tf.range(tf.size(input=perm)), message=msg) ] return assertions
tensorflow.get_static_value
10,143
import tensorflow as tf parser.add_argument('--data_dir', type=str, default='/tmp/mnist-data', help='the directory of MNIST dataset') parser.add_argument('--lr', type=float, default=0.01, help='learning rate') parser.add_argument('--batch_size', type=int, default=32, help='batch size') parser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step') parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file') args = parser.parse_args() def model(): x = tf.placeholder(tf.float32, [None, 784], name='x') gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth') with tf.variable_scope('layer1'): w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer()) b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0)) h1 = tf.nn.relu(tf.matmul(x, w1) + b1) with tf.variable_scope('layer2'): w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer()) b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0)) h2 = tf.nn.relu(tf.matmul(h1, w2) + b2) with tf.variable_scope('layer3'): w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer()) b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0))
tensorflow.variable_scope
10,144
from tensorflow.python.framework import ops as _ops name: A name for the operation (optional). Returns: A `Tensor` of type `resource`. """ result = _op_def_lib.apply_op("StubResourceHandleOp", container=container, shared_name=shared_name, name=name) return result _ops.RegisterShape("StubResourceHandleOp")(None) _test_string_output_outputs = ["output1", "output2"] _TestStringOutputOutput = _collections.namedtuple("TestStringOutput", _test_string_output_outputs) def test_string_output(input, name=None): r"""TODO: add doc.
tensorflow.python.framework.ops.RegisterShape
10,145
import tensorflow as tf with tf.Graph().as_default(): energy_fn, _, _ = l2hmc.get_scg_energy_fn() dynamics = l2hmc.Dynamics( x_dim=hparams.x_dim, minus_loglikelihood_fn=energy_fn, n_steps=hparams.n_steps, eps=hparams.eps) x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim]) x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x) samples = npr.normal(size=[hparams.n_samples, hparams.x_dim]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run( [x_, v_, x_accept_prob, x_out], feed_dict={x: samples}) self.assertEqual(np_x_.shape, np_v_.shape) self.assertEqual(samples.shape, np_x_out.shape) self.assertEqual(np_x_.shape, np_x_out.shape) self.assertEqual(np_x_accept_prob.shape, (hparams.n_samples,)) class L2hmcBenchmark(tf.test.Benchmark): """Eager and graph benchmarks for l2hmc."""
tensorflow.global_variables_initializer
10,146
import tensorflow as tf self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out])) self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1])
tensorflow.einsum
10,147
import tensorflow as tf embedding = tf.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
tensorflow.nn.embedding_lookup
10,148
import tensorflow as tf updates_collections=None, epsilon=self.epsilon, scale=True, is_training=tftrain, scope=self.name) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope(scope or "Linear"): matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return tf.matmul(input_, matrix) + bias, matrix, bias else: return tf.matmul(input_, matrix) + bias
tensorflow.variable_scope
10,149
import tensorflow as tf # Training self.learning_rate = 0.0025 self.lambda_loss_amount = 0.0015 self.training_epochs = 300 self.batch_size = 1000 # LSTM structure self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network self.n_classes = 6 # Final output classes self.W = { 'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32] 'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6] } self.biases = { 'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32] 'output': tf.Variable(tf.random_normal([self.n_classes])) # [6] } config = Config(X_train, X_test) # print("Some useful info to get an insight on dataset's shape and normalisation:") # print("features shape, labels shape, each features mean, each features standard deviation") # print(X_test.shape, y_test.shape, # np.mean(X_test), np.std(X_test)) # print("the dataset is therefore properly normalised, as expected.") # # # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------
tensorflow.random_normal
10,150
import tensorflow as tf if self.hparams.l2_loss > 0: for p in denoise_params: # self.weighs_propen=p # p=tf.Print(p,[p],message="show the weights") self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p)) for p in ranking_model_params: self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = tf.gradients(self.exam_loss, denoise_params) ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0:
tensorflow.nn.l2_loss
10,151
import tensorflow as tf labels = tf.to_float(labels) repeat_op = tf.to_float( tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings])) repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op))) weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2) pred_ = predictions**y_pow try:
tensorflow.to_float
10,152
import tensorflow as tf self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) self.dropout = tf.placeholder(tf.float32, name="dropout") self.global_step = tf.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings """ def _embed(self):
tensorflow.placeholder
10,153
import tensorflow as tf else: nfeats_conv = reduce(lambda x, y: x * y, [int(x) for x in cnn_output.get_shape()[-3:]]) feats_conv = tf.reshape(cnn_output, [batch_size * rnn_nunroll, nfeats_conv]) nfeats_tot = nfeats_conv + nfeats feats_all = tf.concat(1, [feats_conv, feats_other]) print('feats_cnn: {}'.format(feats_conv.get_shape())) print('feats_all: {}'.format(feats_all.get_shape())) # Project to RNN size rnn_output = feats_all rnn_output_size = nfeats_tot if do_rnn: with tf.variable_scope('rnn_proj'): rnn_proj_w = tf.get_variable('W', [nfeats_tot, rnn_size], initializer=tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype) rnn_proj_b = tf.get_variable('b', [rnn_size], initializer=tf.constant_initializer(0.0), dtype=dtype) rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = tf.nn.rnn_cell.BasicRNNCell elif rnn_cell_type == 'gru': cell_fn = tf.nn.rnn_cell.GRUCell elif rnn_cell_type == 'lstm':
tensorflow.uniform_unit_scaling_initializer
10,154
import tensorflow as tf hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss)
tensorflow.nn.dropout
10,155
import tensorflow as tf flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length,
tensorflow.gather
10,156
from tensorflow.python.framework import ops - `E_ = E if E else [1]`, - `S_ = [tf.reduce_prod(S)]`. This function "reverses" `make_batch_of_event_sample_matrices`. Args: x: `Tensor` of shape `B_+E_+S_`. sample_shape: `Tensor` (1D, `int32`). name: `String`. The name to give this op. Returns: x: `Tensor`. Input transposed/reshaped to `S+B+E`. """ with self._name_scope(name, values=[x, sample_shape]): x = ops.convert_to_tensor(x, name="x") sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape") x = distribution_util.rotate_transpose(x, shift=1) if self._is_all_constant_helper(self.batch_ndims, self.event_ndims): if self._batch_ndims_is_0 or self._event_ndims_is_0: b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else: s = (x.get_shape().as_list() if x.get_shape().is_fully_defined() else array_ops.shape(x)) batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,)) # Since sample_dims=1 and is left-most, we add 1 to the number of # batch_ndims to get the event start dim.
tensorflow.python.framework.ops.convert_to_tensor
10,157
from tensorflow.contrib.layers.python.layers import utils shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + tf.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), tf.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with.
tensorflow.contrib.layers.python.layers.utils.smart_cond
10,158
import tensorflow as tf with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(_LOGITS_SCOPE_NAME, _LOGITS_SCOPE_NAME, [features]):
tensorflow.truncated_normal_initializer
10,159
import tensorflow as tf max_prob = tf.reduce_max(prob, axis=-1) estimator_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={ 'pred_label':pred_label, "max_prob":max_prob }, export_outputs={ "output":tf.estimator.export.PredictOutput( { 'pred_label':pred_label, "max_prob":max_prob } ) } ) return estimator_spec
tensorflow.estimator.export.PredictOutput
10,160
import tensorflow as tf def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) var = grad_and_vars[0][1] grad_and_var = (grad, var) average_grads.append(grad_and_var) return average_grads def binary_mask(shape, p=0.7): samples = tf.random_uniform(shape, minval=0.0, maxval=1.0)
tensorflow.reduce_mean
10,161
import tensorflow as tf # Note: This results in a H2D copy, but no computation # Note: This avoids recomputation of the random values, but still # results in a H2D copy. images = tf.contrib.framework.local_variable(images, name='images') labels = tf.contrib.framework.local_variable(labels, name='labels') # Change to 0-based (don't use background class like Inception does) labels -= 1 if num_compute_devices == 1: images_splits = [images] labels_splits = [labels] else: images_splits = tf.split(images, num_compute_devices, 0) labels_splits = tf.split(labels, num_compute_devices, 0) return nclass, images_splits, labels_splits def create_config_proto(): config = tf.ConfigProto() config.allow_soft_placement = True config.intra_op_parallelism_threads = FLAGS.num_intra_threads config.inter_op_parallelism_threads = FLAGS.num_inter_threads config.gpu_options.force_gpu_compatible = FLAGS.force_gpu_compatible return config
tensorflow.split
10,162
import tensorflow as tf with tf.device('/cpu:0'), tf.name_scope("embedding_tags"): W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer) embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags) embedded_tags_expanded = tf.expand_dims(embedded_tags, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_deps"): W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer) embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps) embedded_deps_expanded = tf.expand_dims(embedded_deps, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_head"): W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer) embedded_head = tf.nn.embedding_lookup(W_head, self.input_head) embedded_head_expanded = tf.expand_dims(embedded_head, -1) cnn_inputs = tf.concat( [embedded_text_expand, embedded_tags_expanded, embedded_deps_expanded, embedded_head_expanded], -1) print("-" * 20) print("Embedded Lookup:", cnn_inputs.get_shape()) print("-" * 20)
tensorflow.device
10,163
import tensorflow as tf # with tf.variable_scope('layer_mix_eval'): # lin1 = tf.matmul(tf.reshape(self.q_concat, shape=[-1, 1, self.n_agents]), self.w1) + tf.reshape(self.b1, shape=[-1, 1, 32]) # a1 = tf.nn.elu(lin1, name='a1') # self.Q_tot = tf.reshape(tf.matmul(a1, self.w2), shape=[-1, 1]) + self.b2 # with tf.variable_scope('layer_mix_target'): # lin1_ = tf.matmul(tf.reshape(self.q_concat_, shape=[-1, 1, self.n_agents]), self.w1_) + tf.reshape(self.b1_, shape=[-1, 1, 32]) # a1_ = tf.nn.elu(lin1_, name='a1_') # self.Q_tot_ = tf.reshape(tf.matmul(a1_, self.w2_), shape=[-1, 1]) + self.b2_ # todo: add q_target, loss, train_op # with tf.variable_scope('q_target'): with tf.variable_scope('loss'): self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, tf.squeeze(self.Q_tot), name='TD_error')) # self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.Q_tot, name='TD_error')) with tf.variable_scope('train'): self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) def act(self, state): if np.random.uniform() > self.epsilon :# pick the argmax action s = np.array(state) if len(s.shape) < 2: s = np.array(state)[np.newaxis, :] q_eval = self.sess.run(self.q_eval, feed_dict={self.s: s}) action = np.argmax(q_eval, axis=-1).tolist() else: # pick random action
tensorflow.squeeze
10,164
import tensorflow as tf def testIntegratedGradientsGetMask(self): with tf.Graph().as_default() as graph: x = tf.placeholder(shape=[None, 3], dtype=tf.float32) y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + tf.sin(x[:, 2]) with tf.Session() as sess: # Calculate the value of `y` at the baseline. x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float) y_baseline_val = sess.run(y, feed_dict={x: x_baseline_val})
tensorflow.Session
10,165
import tensorflow as tf return weights def bias_variable(self,name, shape): with tf.variable_scope(name) as scope: biases = tf.get_variable('biaess', shape=shape,
tensorflow.variable_scope
10,166
import tensorflow as tf model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"): self.model_prob = model_prob # probability to keep units self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2] self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s self.p_s = tf.shape(self.dropout_mask_ph)[0] # post sample size self.DM = tf.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s self.DM = tf.linalg.set_diag(self.DM, self.dropout_mask_ph) kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01) self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
tensorflow.zeros
10,167
import tensorflow as tf hss = param_eta * tf.reduce_sum(tf.matmul(varphisKt, prec) * varphisKt, axis=1) Haa = param_eta * prec + Waa # Haa = 0.5 * (Haa + TT.transpose(Haa)) HaaInv = tf.matrix_inverse(Haa) # The two terms 'term1' and 'term2' which come from normalizers of the # 1. Original policy distribution # 2. The distribution after completing the square sigma = tf.matrix_inverse(prec) term1 = -0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * sigma)) if self.beta == 0: term2 = 0.5 * param_eta * tf.log(tf.matrix_determinant(2 * np.pi * param_eta * HaaInv)) else: term2 = 0.5 * (param_eta + param_omega) * tf.log(tf.matrix_determinant(2 * np.pi * (param_eta + param_omega) * HaaInv)) dual = param_eta * self.epsilon - param_omega * beta + \ term1 + term2 + tf.reduce_mean( 0.5 * (tf.reduce_sum(tf.matmul(ha, HaaInv) * ha, axis=1) - hss)) # Symbolic dual gradient dual_grad = tf.gradients(xs=[param_eta, param_omega], ys=dual) # Eval functions. f_dual = U.function( inputs=[varphis, Kt, prec, Waa, Wsa, wa] + [param_eta, param_omega, old_entropy], outputs=dual, # mode='DebugMode' # TEST
tensorflow.matrix_determinant
10,168
import tensorflow as tf batch1 = tf.gather(batch, tf.random.shuffle(index)) batch2 = tf.gather(batch, tf.random.shuffle(index)) pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1])
tensorflow.slice
10,169
import tensorflow as tf pmm = tf.pow(1.0 - tf.pow(x, 2.0), tf.cast(m, dtype=x.dtype) / 2.0) ones = tf.ones_like(m) pmm *= tf.cast( tf.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return tf.cast(tf.math.count_nonzero(n <= l), tf.bool) def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1): n_float = tf.cast(n, dtype=x.dtype) m_float = tf.cast(m, dtype=x.dtype) pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / ( n_float - m_float) pmm = tf.where(tf.less_equal(n, l), pmm1, pmm)
tensorflow.math.count_nonzero
10,170
import tensorflow as tf A tuple containing: - Input tensor of the model. - Prediction tensor of the model. - Output class tensor of the model. """ # Weights and biases for output softmax layer. out_weights = tf.Variable( tf.random_normal([self.num_units, self.n_classes])) out_bias = tf.Variable(tf.random_normal([self.n_classes])) # input image placeholder x = tf.placeholder( "float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE") # x is shaped [batch_size,time_steps,num_inputs] if is_dynamic_rnn: lstm_input = tf.transpose(x, perm=[1, 0, 2]) outputs, _ = tf.lite.experimental.nn.dynamic_rnn( lstm_layer, lstm_input, dtype="float32") outputs = tf.unstack(outputs, axis=0) else: lstm_input = tf.unstack(x, self.time_steps, 1) outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
tensorflow.placeholder
10,171
import tensorflow as tf predictions for the proposals. Raises: ValueError: if num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] def get_box_indices(proposals): proposals_shape = proposals.get_shape().as_list() if any(dim is None for dim in proposals_shape): proposals_shape = tf.shape(proposals) ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32) multiplier = tf.expand_dims( tf.range(start=0, limit=proposals_shape[0]), 1) return tf.reshape(ones_mat * multiplier, [-1]) net = image_features with slim.arg_scope(self._conv_hyperparams): net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes *
tensorflow.shape
10,172
import tensorflow as tf def exponentiate(x, log_lambdas, inverse=tf.constant(False)): if not inverse: z = tf.math.exp(log_lambdas)*x ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3]) else: z = x*tf.math.exp(-log_lambdas)
tensorflow.math.reduce_sum
10,173
import tensorflow as tf import BatchDatsetReader as dataset from six.moves import xrange import os.path as osp FLAGS = tf.flags.FLAGS tf.flags.DEFINE_integer("batch_size", "2", "batch size for training") tf.flags.DEFINE_string("logs_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\logs", "path to logs directory") tf.flags.DEFINE_string("data_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Data_zoo\STEM", "path to dataset") tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer") tf.flags.DEFINE_string("model_dir", r"E:\work\01-Myproject\imag_division\FCN.tensorflow-master\Model_zoo", "Path to vgg model mat") tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False") tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize") MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat' MAX_ITERATION = 100 #最大步数 NUM_OF_CLASSESS = 3 #分类数目 IMAGE_SIZE = 2048 #图像大小 def vgg_net(weights, image): layers = ( 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
tensorflow.flags.DEFINE_string
10,174
import tensorflow as tf tf.logging.info('Starting a training cycle.') fashionAI.train(input_fn=lambda : input_pipeline(True, model_scope, epochs_per_eval), hooks=[logging_hook], max_steps=(steps_per_epoch*train_epochs)) tf.logging.info('Starting to evaluate.') eval_results = fashionAI.evaluate(input_fn=lambda : input_pipeline(False, model_scope, 1)) tf.logging.info(eval_results) tf.logging.info('Finished model {}.'.format(model_scope)) def main(_): # Using the Winograd non-fused algorithms provides a small performance boost.
tensorflow.logging.info
10,175
import tensorflow as tf avoid_zero_div = 1e-8 avoid_nan_norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(grad), reduction_indices=red_ind, keepdims=True)) normalized_grad = old_div(grad, avoid_nan_norm) elif ord == 2: red_ind = list(range(1, len(x.get_shape()))) avoid_zero_div = 1e-8 square = tf.maximum(avoid_zero_div, reduce_sum(tf.square(grad), reduction_indices=red_ind, keepdims=True)) normalized_grad = old_div(grad, tf.sqrt(square)) else: normalized_grad = tf.sign(grad) normalized_grad = tf.stop_gradient(normalized_grad) scaled_grad = eps * normalized_grad
tensorflow.square
10,176
from tensorflow.python.ops import math_ops """ with ops.name_scope(name, 'true_positives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id( labels, predictions_idx, class_id) tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels)) tp = math_ops.to_double(tp) if weights is not None: weights = math_ops.to_double(weights) tp = math_ops.mul(tp, weights) return tp def _streaming_sparse_true_positive_at_k(predictions_idx, labels, k=None, class_id=None,
tensorflow.python.ops.math_ops.mul
10,177
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.layers.dense
10,178
import tensorflow as tf self.const_initializer = tf.constant_initializer(0.0) self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0) # Place holder for features and captions self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer) h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer) c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with tf.variable_scope('project_features'): w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer) features_flat = tf.reshape(features, [-1, self.D])
tensorflow.get_variable
10,179
import tensorflow as tf rgb = tf.image.random_flip_left_right(rgb) # cast, bit conversion, compress domain, center rgb = tf.cast(rgb, tf.float32) if n_bits < 8: rgb = tf.floor(rgb/(2**(8-n_bits)))
tensorflow.cast
10,180
import tensorflow as tf variables_collections, outputs_collections, trainable, use_spectral_norm, is_training, scope, conv_dims=2) if not self_attention: return h with tf.variable_scope("self_attention"): with tf.variable_scope("f"): f = convolution( inputs, num_outputs // 8, kernel_size, stride, padding, data_format,
tensorflow.variable_scope
10,181
import tensorflow as tf bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4]) glabels = tf.reshape(glabels, [-1]) gscores = tf.reshape(gscores, [-1]) gtargets = tf.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = tf.cast(positive_mask, tf.float32) n_positives = tf.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.) negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.) #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.) #negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask)) fnegtive_mask = tf.cast(negtive_mask, tf.float32) n_negtives = tf.reduce_sum(fnegtive_mask) n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
tensorflow.cast
10,182
import tensorflow as tf loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) else: loss = tf.losses.softmax_cross_entropy( label, logits, label_smoothing=label_smoothing, reduction=tf.losses.Reduction.NONE) loss = tf.reduce_mean(loss, name='xentropy-loss') def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'): with tf.name_scope('prediction_incorrect'): x = tf.logical_not(tf.nn.in_top_k(logits, label, topk)) return tf.cast(x, tf.float32, name=name) wrong = prediction_incorrect(logits, label, 1, name='wrong-top1') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1')) wrong = prediction_incorrect(logits, label, 5, name='wrong-top5') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5')) return loss
tensorflow.cast
10,183
import tensorflow as tf x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
tensorflow.constant_initializer
10,184
import tensorflow as tf x = tf.image.random_contrast(x, lower=lower, upper=upper) x = tf.image.random_saturation(x, lower=lower, upper=upper) x = tf.image.random_hue(x, max_delta=0.2*s) x = tf.clip_by_value(x, 0, 1) return x
tensorflow.clip_by_value
10,185
import tensorflow as tf def fake_image_resizer_fn(image, masks=None): resized_image = tf.image.resize_images(image, [8, 8]) results = [resized_image]
tensorflow.image.resize_images
10,186
import tensorflow as tf reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity) if self.dale_ratio: reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out)) else: reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity) # L2 weight regularization reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity)) reg += self.L2_rec * tf.reduce_mean(tf.square(tf.abs(self.W_rec) * self.rec_Connectivity)) if self.dale_ratio: reg += self.L2_out * tf.reduce_mean(tf.square( tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))) else: reg += self.L2_out * tf.reduce_mean(tf.square(tf.abs(self.W_out) * self.output_Connectivity))
tensorflow.abs
10,187
import tensorflow as tf elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
tensorflow.exp
10,188
import tensorflow as tf decoder_inputs = encoder_inputs[0][:, :-1] batch_size = tf.shape(decoder_inputs)[0] pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID decoder_inputs = tf.concat([pad, decoder_inputs], axis=1) outputs, _, states, attns, _, _, _ = attention_decoder( attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
tensorflow.concat
10,189
import tensorflow as tf logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None')) return False reader = tf.train.NewCheckpointReader(self.restore_ckpt_file) saved_shapes = reader.get_variable_to_shape_map() if self._var_list is None: restore_key2vars = {var.name.split(':')[0]: var for var in tf.global_variables()} elif isinstance(self._var_list, list): restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list} elif isinstance(self._var_list, dict): restore_key2vars = self._var_list
tensorflow.global_variables
10,190
import tensorflow as tf bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) b = bias_variable([FLAGS.feats_per_layer]) Wx_b = tf.nn.conv3d(bottom, W, strides=[1,1,1,1,1], padding='VALID') + b out = tf.nn.relu(Wx_b) shape = out.get_shape() print('conv{}'.format(l+1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape()))
tensorflow.nn.relu
10,191
import tensorflow as tf True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ arr = sorted(raw_arr, key=lambda d:d[0], reverse=True) pos, neg = 0., 0. for record in arr:
tensorflow.maximum
10,192
import tensorflow as tf 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={ testnum: test_num, trainnum: train_num, learnrate:lr } for i in range(loop_count):
tensorflow.train.Coordinator
10,193
import tensorflow as tf *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled. """ negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None:
tensorflow.logical_and
10,194
import tensorflow as tf #print ("actual ", tf.argmax(ys_i,1).eval(session=sess)) print('Validation accuracy for epoch {}: '.format(epoch_i+1), this_accuracy / its) print("---------------------------------------") print("***************") print("Training done!!") save_path = saver.save(sess, ckpt_name) print("Model saved in file: %s" % save_path) print ("creating protobuf...") g_1 = tf.get_default_graph() with tf.Session(graph = g_1) as sess: saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
tensorflow.Session
10,195
import tensorflow as tf self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps size = config.hidden_size vocab_size = config.vocab_size with tf.device("/cpu:0"): embedding = tf.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training)
tensorflow.device
10,196
from tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections # Add variables to collections. _add_variable_to_collections(layer.kernel, variables_collections, 'weights')
tensorflow.contrib.layers.python.layers.layers._add_variable_to_collections
10,197
import tensorflow as tf self.predictions = tf.argmax(self.fc3, 1, name="predictions") losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.fc3, labels=self.input_y) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(losses) + sum(regularization_losses) # Accuracy with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.name_scope
10,198
import tensorflow as tf class trainwork(object): def __init__(self): with tf.variable_scope('scop'): self.w1=tf.get_variable('w1', [4096,2048],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w2=tf.get_variable('w2', [2048,3072],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w3=tf.get_variable('w3', [3072,512],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.w4=tf.get_variable('w4', [512,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d()) self.b1 = tf.get_variable('b1', [2048],initializer=tf.constant_initializer(0.0)) self.b2 = tf.get_variable('b2', [3072],initializer=tf.constant_initializer(0.0)) self.b3 = tf.get_variable('b3', [512],initializer=tf.constant_initializer(0.0)) self.b4 = tf.get_variable('b4', [classnum],initializer=tf.constant_initializer(0.0)) def inference(self,images):
tensorflow.contrib.layers.xavier_initializer_conv2d
10,199