seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # Declare model operations x_col_sums_2D = tf.expand_dims(x_col_sums, 0) model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
tensorflow.matmul
8,900
import tensorflow as tf {spacer}Positive count: {train_pos_ct} {spacer}Batch size: {train_batch_size} {multiplier} {spacer}Batch count per epoch: {train_batch_ct} Eval: {spacer}Positive count: {eval_pos_ct} {spacer}Batch size: {eval_batch_size} {multiplier} {spacer}Batch count per epoch: {eval_batch_ct}""" _TRAIN_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.MASK_START_INDEX: tf.FixedLenFeature([1], dtype=tf.string), "labels": tf.FixedLenFeature([], dtype=tf.string), } _EVAL_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string) } class DatasetManager(object): """Helper class for handling TensorFlow specific data tasks.
tensorflow.FixedLenFeature
8,901
import tensorflow as tf "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8,
tensorflow.flags.DEFINE_string
8,902
import tensorflow as tf self.out = output_h4 self.out2 = truthoutput_h4 print(self.out.get_shape()) self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) self.loss = self.recon1 + self.recon2 + self.simloss
tensorflow.nn.l2_loss
8,903
import tensorflow as tf states = self.states dxt_list = tf.gradients(self.error, states) #dxt_list[0] = tf.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = tf.gradients(states[0], states[-1]) dxt = tf.stack(dxt_list) xt = tf.stack(states)
tensorflow.gradients
8,904
import tensorflow as tf def conv1d_banks(inputs, K=16, is_training=True, scope="conv1d_banks"): with tf.variable_scope(scope): outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME") for k in range(2, K + 1): with tf.variable_scope("num_{}".format(k)): output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME") outputs = tf.concat((outputs, output), -1) outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training)) return outputs
tensorflow.layers.conv1d
8,905
import tensorflow as tf normed: batch-normalized maps """ with tf.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = tf.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables.
tensorflow.constant
8,906
from tensorflow.contrib.learn.python.learn.estimators import test_data classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkTensorData(self): def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), (-1, 1)) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'),
tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression
8,907
import tensorflow as tf # Else, just wait for max length lambda: not_overflow) return not_overflow result, logits, loss = tf.while_loop( while_exit_cond, infer_step, [result, logits, loss], shape_invariants=[ tf.TensorShape([None, None, None, None]), tf.TensorShape([None, None, None, None, None]), tf.TensorShape([]), ], back_prop=False, parallel_iterations=1) if inputs_old is not None: # Restore to not confuse Estimator. features["inputs"] = inputs_old # Reassign targets back to the previous value. if targets_old is not None:
tensorflow.TensorShape
8,908
import tensorflow as tf # For printing layers shape self.training_end_points = self.end_points_D self.training_end_points.update(self.end_points_G) tf.summary.histogram("d", self.end_points_D['D_on_data']) tf.summary.histogram("d_", self.end_points_D['D_on_G']) tf.summary.image("G", G) d_label_smooth = self.cnf['d_label_smooth'] # 0.25 self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'], 1. - d_label_smooth)
tensorflow.summary.histogram
8,909
import tensorflow as tf x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) return x_sliced def nearest_neighbor(self, x, means): """Find the nearest element in means to elements in x. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means of shape. Returns: Tensor with nearest element in mean encoded in one-hot notation. """ x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) scalar_prod = tf.matmul( tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) dist = x_norm_sq + tf.transpose( means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if self.hparams.soft_em: nearest_idx = tf.stack( [ tf.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks)
tensorflow.square
8,910
import tensorflow as tf sentence_embeddings = tf.divide(
tensorflow.divide
8,911
import tensorflow as tf for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o)
tensorflow.split
8,912
import tensorflow as tf pred_label = tf.argmax(distillation_loss["st_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) except: te_accuracy = tf.constant(0.0) st_accuracy = tf.constant(0.0) try: st_accuracy = tf.reduce_mean(distillation_loss["src_f1_prob"]) te_accuracy = tf.reduce_mean(distillation_loss["tgt_f1_prob"])
tensorflow.cast
8,913
import tensorflow as tf k = util.shape(top_span_emb, 0) top_antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c] raw_top_antecedents = tf.expand_dims(tf.range(k), 1) - top_antecedent_offsets # [k, c] top_antecedents_mask = raw_top_antecedents >= 0 # [k, c] top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c] top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores, top_antecedents) # [k, c] top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c] return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
tensorflow.expand_dims
8,914
import tensorflow as tf scope.reuse_variables() for layer, value in params.items(): op = tf.get_variable('%s' % layer).assign(value) initializer.append(op) return initializer def save_model(name, scope, sess): variables = tf.get_collection(tf.GraphKeys.WEIGHTS, scope=scope) d = [(v.name.split(':')[0], sess.run(v)) for v in variables] cPickle.dump(d, open(name, 'wb'))
tensorflow.get_collection
8,915
import tensorflow as tf import math class Simulator(): def __init__(self, type) -> None: if type == 'D': # deuteranope self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]]) elif type == 'P': # protanope self.color_matrix = tf.convert_to_tensor([[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]]) elif type == 'T': # tritanope
tensorflow.convert_to_tensor
8,916
import tensorflow as tf batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size), 1) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, 10]), 1.0, 0.0) logits = tf.get_collection("logits")[0] cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name="xentropy") loss = tf.reduce_mean(cross_entropy, name="xentropy_mean") tf.scalar_summary(loss.op.name, loss) # Creates the gradient descent optimizer with the given learning rate.
tensorflow.nn.softmax_cross_entropy_with_logits
8,917
import tensorflow as tf kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) conv = tf.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) conv_layer = tf.nn.bias_add(conv, biases)
tensorflow.constant_initializer
8,918
import tensorflow as tf batch_size = tf.shape(X)[0] noise_shape = (batch_size, 1, 1, 1) random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32) binary_tensor = tf.floor(random_tensor) X = (X / keep_prob) * binary_tensor return X def _do_conv(self, X, w, h, in_ch, out_ch, filter_size=1, no_relu=False, no_reg=False, is_train=False): W = self._make_var('W', (filter_size, filter_size, in_ch, out_ch), no_reg=no_reg) if not no_relu: X = tf.nn.relu(X) X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME') X = self._add_batch_norm(X, out_ch, is_train=is_train) X = tf.reshape(X, (-1, w, h, out_ch)) # Sanity shape check return X def _do_separable_conv(self, X, w, h, ch, filter_size=3, stride=1, ch_mul=1, no_batch_norm=False, W_d=None, W_p=None, is_train=False): if W_d is None: W_d = self._make_var('W_d', (filter_size, filter_size, ch, ch_mul))
tensorflow.nn.relu
8,919
import tensorflow.contrib.layers as layers with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
tensorflow.contrib.layers.convolution2d
8,920
import tensorflow as tf tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.items(): model.export_ops(name) metagraph = tf.train.export_meta_graph() if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1: raise ValueError("num_gpus > 1 is not supported for TensorFlow versions " "below 1.1.0")
tensorflow.variable_scope
8,921
import tensorflow as tf else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing inputs_list = [] for i in range(num_gpu): img = tf.expand_dims(img_batch[i], axis=0) pretrain_zoo = PretrainModelZoo() if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo: img = img / tf.constant([cfgs.PIXEL_STD]) gtboxes_and_label_r = tf.py_func(backward_convert, inp=[gtboxes_and_label_batch[i]], Tout=tf.float32) gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6]) gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
tensorflow.expand_dims
8,922
from tensorflow.python.ops import math_ops self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) g_t = grad g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t
tensorflow.python.ops.math_ops.cast
8,923
import tensorflow as tf o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4') o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5') # Decoder definition o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1') o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2') o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3') o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4') o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None) prediction = tf.nn.softmax(logits, name = name + '_softmax') return logits, prediction def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"): with tf.variable_scope(name): conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None) if do_norm: conv = tf.layers.batch_normalization(conv, momentum=0.9) if activation_function == "relu":
tensorflow.layers.conv2d
8,924
import tensorflow as tf fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/read' in op.name] ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = tf.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # tf.Dimension values are not compatible with int, convert manually
tensorflow.get_collection
8,925
import tensorflow as tf "cluster": {"worker": worker}, "task": {"type": "worker", "index": task_index}, } ) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() run_config = tf.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps,
tensorflow.estimator.RunConfig
8,926
import tensorflow as tf # Optimizer with tf.name_scope('optimizer'):
tensorflow.name_scope
8,927
import tensorflow as tf Args: x: The input tensor. prediction: The prediction class tensor. output_class: The output tensor. sess: The graph session. """ # input label placeholder y = tf.placeholder("float", [None, self.n_classes]) # Loss function loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) # Optimization opt = tf.train.AdamOptimizer( learning_rate=self.learning_rate).minimize(loss) # Initialize variables init = tf.global_variables_initializer() sess.run(init) for _ in range(TRAIN_STEPS): batch_x, batch_y = self.mnist.train.next_batch( batch_size=self.batch_size, shuffle=False)
tensorflow.nn.softmax_cross_entropy_with_logits
8,928
import tensorflow as tf adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = tf.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = [tf.size(nodes), tf.size(next_nodes)] next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse.reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.size
8,929
import tensorflow as tf fields.InputDataFields.multiclass_scores] tensor_dict.pop(fields.InputDataFields.multiclass_scores, None) if fields.InputDataFields.groundtruth_confidences in tensor_dict: groundtruth_confidences = tensor_dict[ fields.InputDataFields.groundtruth_confidences] # Map the confidences to the one-hot encoding of classes tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( tf.reshape(groundtruth_confidences, [-1, 1]) * tensor_dict[fields.InputDataFields.groundtruth_classes]) else: groundtruth_confidences = tf.ones_like( zero_indexed_groundtruth_classes, dtype=tf.float32) tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( tensor_dict[fields.InputDataFields.groundtruth_classes])
tensorflow.reshape
8,930
import tensorflow as tf p = int(math.floor(((oh - 1) * stride + kernel_size - h)//2)) inputs = tf.pad(inputs, [[0, 0], [p, p], [p, p], [0, 0]], 'CONSTANT')
tensorflow.pad
8,931
import tensorflow as tf self.s = tf.layers.dense(inputs=self.z, units=self.g_dim, activation=tf.nn.elu) # Calculate manager output g x = tf.expand_dims(self.s, [0]) self.manager_lstm = SingleStepLSTM(x, self.g_dim, step_size=tf.shape(self.obs)[:1]) g_hat = self.manager_lstm.output
tensorflow.expand_dims
8,932
import tensorflow as tf return val with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter): self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, self.n_envs * (self.n_steps + 1), reuse=True, **self.policy_kwargs) with tf.variable_scope("loss", reuse=False): self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch]) self.learning_rate_ph = tf.placeholder(tf.float32, []) eps = 1e-6
tensorflow.variable_scope
8,933
import tensorflow as tf def _word_embedding(self, inputs, reuse=False): with tf.variable_scope('word_embedding', reuse=reuse): w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with tf.variable_scope('project_features'): w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer) features_flat = tf.reshape(features, [-1, self.D]) features_proj = tf.matmul(features_flat, w) features_proj = tf.reshape(features_proj, [-1, self.L, self.D]) return features_proj def _attention_layer(self, features, features_proj, h, reuse=False):
tensorflow.variable_scope
8,934
from tensorflow.python.training import moving_averages trainable=False) return moving_averages.assign_moving_average(
tensorflow.python.training.moving_averages.assign_moving_average
8,935
import tensorflow as tf if status2: a = 2 values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 4].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 4]) print(mtype, fig_obj_count, 2) fig_obj_count += 1
tensorflow.reduce_max
8,936
import tensorflow as tf "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string(
tensorflow.flags.DEFINE_string
8,937
import tensorflow as tf images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = tf.train.Saver() # Open session and restore checkpoint sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tensorflow.train.Saver
8,938
from tensorflow.contrib import layers max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope( parent_scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=activation_fn, variables_collections=[parent_scope], scope=scope) if dropout is not None and mode == model_fn.ModeKeys.TRAIN: net = layers.dropout(net, keep_prob=(1.0 - dropout)) _add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( parent_scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None, variables_collections=[parent_scope], scope=scope)
tensorflow.contrib.layers.dropout
8,939
import tensorflow as tf if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.nn.bias_add
8,940
import tensorflow as tf tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
tensorflow.ones_like
8,941
import tensorflow as tf file_pattern = os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the signature so that dataset_factory can use the default. if reader is None: reader = tf.TFRecordReader # Features in Pascal VOC TFRecords. keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature([1], tf.int64), 'image/width': tf.FixedLenFeature([1], tf.int64), 'image/channels': tf.FixedLenFeature([1], tf.int64), 'image/shape': tf.FixedLenFeature([3], tf.int64), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
tensorflow.FixedLenFeature
8,942
from tensorflow import keras # Create inference model using Keras # The model here is a dnn regressor def make_keras_estimator(output_dir): from tensorflow import keras model = keras.models.Sequential() model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(1)) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['mae', 'mape']) # mean absolute [percentage] error return keras.estimator.model_to_estimator(model, model_dir=output_dir) # Create the inference model def simple_rnn(features, labels, mode): # 0. Reformat input shape to become a sequence x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1) # 1. Configure the RNN lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0) outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
tensorflow.keras.estimator.model_to_estimator
8,943
import tensorflow as tf output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if clip: log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.nn.softmax
8,944
import tensorflow as tf low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = tf.to_float(idx < low) mhigh = tf.to_float(idx > high) m = mlow + mhigh m += tf.to_float(idx >= encoder_input_length) mask = tf.to_float(tf.equal(m, 0.0)) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) weights = softmax(e, mask=mask) if encoder.attn_window_size > 0: sigma = encoder.attn_window_size / 2 numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32)) div = tf.truediv(numerator, 2 * sigma ** 2) weights *= tf.exp(div) # result of the truncated normal distribution # normalize to keep a probability distribution # weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) return weighted_average, weights def attention(encoder, scope=None, **kwargs): attention_functions = { 'global': global_attention,
tensorflow.convert_to_tensor
8,945
import tensorflow as tf return tf.contrib.layers.batch_norm(input_tensor,scope=scope,is_training=training,decay=0.99) def deconv_bn_relu(self, bottom, name, kernel_size, output_channels, initializer, stride = 1, bn=False, training=False, relu=True): input_shape = bottom.get_shape().as_list() input_channels = input_shape[-1] output_shape = [input_shape[0], input_shape[1]*stride, input_shape[2]*stride, output_channels] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, output_channels, input_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) deconv = tf.nn.conv2d_transpose(bottom, kernel, output_shape, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) deconv_layer = tf.nn.bias_add(deconv, biases) if bn: deconv_layer = self.batch_norm_layer('batch_norm_layer',deconv_layer,training) if relu: deconv_layer = tf.nn.relu(deconv_layer, name=scope.name) print('Deconv layer {0} -> {1}'.format(bottom.get_shape().as_list(),deconv_layer.get_shape().as_list())) return deconv_layer def variable(self, name, shape, initializer,regularizer=None): with tf.device('/cpu:0'): return tf.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True) def fc_layer(self, bottom, in_size, out_size, name): with tf.variable_scope(name): weights, biases = self.get_fc_var(in_size, out_size, name)
tensorflow.nn.relu
8,946
import tensorflow as tf curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent inputs = tf.nn.embedding_lookup(self.w_emb, op_id) next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) anchors = anchors.write(layer_id, next_h[-1]) anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1)) inputs = self.g_emb return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1, arc_seq, entropy, log_prob) loop_vars = [ tf.constant(2, dtype=tf.int32, name="layer_id"), inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq, tf.constant([0.0], dtype=tf.float32, name="entropy"), tf.constant([0.0], dtype=tf.float32, name="log_prob"), ] loop_outputs = tf.while_loop(_condition, _body, loop_vars, parallel_iterations=1)
tensorflow.constant
8,947
import tensorflow as tf cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2) outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32) outputs = tf.concat(outputs, 2) self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2) self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y)) self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z)) self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
tensorflow.abs
8,948
from tensorflow.python.framework import ops x: A `Tensor` of type `float`, `double`, `int32`, or `int64`. name: A name for the operation (optional). Returns: A `Tensor` the same size and type as `x` with absolute values. """ with ops.op_scope([x], name, "Abs") as name: x = ops.convert_to_tensor(x, name="x") if x.dtype == types.complex64: return gen_math_ops.complex_abs(x, name=name) return gen_math_ops._abs(x, name=name)
tensorflow.python.framework.ops.op_scope
8,949
import tensorflow as tf tf.set_random_seed(1234) sess = tf.Session(graph=tf.get_default_graph())
tensorflow.get_default_graph
8,950
import tensorflow as tf # This file was modified by Vincent ADAM # ------------------------------------------ import tensorflow as tf from settings import float_type from quadrature import hermgauss import numpy as np def eye(N): """ An identitiy matrix """ return tf.diag(tf.ones(tf.stack([N, ]), dtype=float_type)) def variational_expectations( Fmu, Fvar, phi, num_gauss_hermite_points=20): """ Compute the expected value of a function phi, given a Gaussian distribution for the input values. if q(f) = N(Fmu, Fvar) then this method computes \int phi(f) q(f) df. Here, we implement a default Gauss-Hermite quadrature routine """
tensorflow.stack
8,951
import tensorflow as tf else: logdir = FLAGS.log_dir writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g) writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g) saver = tf.train.Saver(tf.global_variables()) sv = tf.train.Supervisor( is_chief=True, logdir=logdir, init_op=init_op,
tensorflow.global_variables
8,952
import tensorflow as tf tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')
tensorflow.app.flags.DEFINE_integer
8,953
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c)
tensorflow.keras.layers.Conv2D
8,954
import tensorflow as tf self.compute_shape(l2_shape[2], self.ff_pool_strides[1][1]), self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]), final_dim] else: l2_shape = tf.identity(x_shape) # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = tf.identity(x) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = tf.random_normal(x_shape, dtype=self.dtype) l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype) l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) else: raise RuntimeError # While loop elems = [ i0, x,
tensorflow.random_normal
8,955
import tensorflow as tf reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss') tf.summary.scalar('mse', mse_loss)
tensorflow.add_n
8,956
import tensorflow as tf with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session(graph=tf.Graph()) as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver([v0_2, v1_2]) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def _SaveAndLoad(self, var_name, var_value, other_value, save_path): with self.test_session() as sess: var = tf.Variable(var_value, name=var_name)
tensorflow.train.Saver
8,957
import tensorflow as tf else: self.embedding_W = tf.Variable(tf.random_uniform([num_quantized_chars, embedding_size], -1.0, 1.0),name="embedding_W") self.embedded_characters = tf.nn.embedding_lookup(self.embedding_W, self.input_x) embedded_text_expand = tf.expand_dims(self.embedded_characters, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_tags"): W_tags = tf.get_variable("embed_W_tags", [tags_vocab_size, embedding_size], initializer=initializer) embedded_tags = tf.nn.embedding_lookup(W_tags, self.input_tags) embedded_tags_expanded = tf.expand_dims(embedded_tags, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_deps"): W_deps = tf.get_variable("embed_W_deps", [deps_vocab_size, embedding_size], initializer=initializer) embedded_deps = tf.nn.embedding_lookup(W_deps, self.input_deps) embedded_deps_expanded = tf.expand_dims(embedded_deps, -1) with tf.device('/cpu:0'), tf.name_scope("embedding_head"): W_head = tf.get_variable("embed_W_head", [num_quantized_chars, embedding_size], initializer=initializer) embedded_head = tf.nn.embedding_lookup(W_head, self.input_head) embedded_head_expanded = tf.expand_dims(embedded_head, -1) cnn_inputs = tf.concat( [embedded_text_expand, embedded_tags_expanded, embedded_deps_expanded, embedded_head_expanded], -1) print("-" * 20) print("Embedded Lookup:", cnn_inputs.get_shape()) print("-" * 20) self.layers = [] # Temp(First) Conv Layer
tensorflow.name_scope
8,958
import tensorflow as tf - responsible_next_loc is NOW policy ''' self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos if TRAINING: self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget') self.advantages = tf.placeholder(shape=[None], dtype=tf.float32) self.sampled_next_locs = tf.placeholder(tf.float32, [None,2]) # sampled action is stored here self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy
tensorflow.placeholder
8,959
import tensorflow as tf decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s))))) return tf.maximum(learning_rate * decay, 5e-6) elif params.learning_rate_decay == "piecewise_constant": return tf.train.piecewise_constant(tf.to_int32(global_step), params.learning_rate_boundaries, params.learning_rate_values) elif params.learning_rate_decay == "none":
tensorflow.to_int32
8,960
import tensorflow.contrib as contrib # he initialization weights_initializer=contrib.layers.variance_scaling_initializer(), # l2 regularization weights_regularizer=contrib.layers.l2_regularizer(reg_lambda), # BN normalizer_fn=contrib.layers.batch_norm,
tensorflow.contrib.layers.l2_regularizer
8,961
from tensorflow.python.framework import ops check_shape_op = logging_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1])
tensorflow.python.framework.ops.control_dependencies
8,962
from tensorflow.contrib.slim.python.slim.data import test_utils def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1) keys_to_features = { 'image/encoded':
tensorflow.contrib.slim.python.slim.data.test_utils.create_tfrecord_files
8,963
from tensorflow.contrib.layers.python.layers import initializers activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with tf.variable_scope("action_value"): out = layers.fully_connected( out, num_outputs=num_actions, activation_fn=tf.nn.relu, biases_initializer=None, weights_initializer=gauss_initializer, weights_regularizer=None) return out def atari_learn(env, session,
tensorflow.contrib.layers.python.layers.initializers.xavier_initializer
8,964
import tensorflow as tf instance_labels = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2]], dtype=tf.int32) instance_labels = tf.reshape(instance_labels, [-1]) (indices, masks_t) = isu.randomly_select_one_point_per_segment(instance_labels) masks = tf.transpose(masks_t) masks = tf.reshape(masks, [3, 5, 8]) expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32) self.assertAllEqual(selected_instances.numpy(), expected_selected_instances.numpy()) self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1),
tensorflow.constant
8,965
import tensorflow as tf if bidirectional: lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) outputs = tf.concat([outputs_fw, outputs_bw], axis=-1)
tensorflow.contrib.rnn.TimeReversedFusedRNN
8,966
import tensorflow as tf [0, 0, 1]], dtype=tf.int32) self.assertAllEqual(labels_0_n.numpy(), expected_labels_0_n.numpy()) def test_randomly_select_one_point_per_segment(self): instance_labels = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2]], dtype=tf.int32) instance_labels = tf.reshape(instance_labels, [-1]) (indices, masks_t) = isu.randomly_select_one_point_per_segment(instance_labels) masks = tf.transpose(masks_t) masks = tf.reshape(masks, [3, 5, 8]) expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32) self.assertAllEqual(selected_instances.numpy(), expected_selected_instances.numpy()) self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform(
tensorflow.transpose
8,967
import tensorflow as tf def featurize_state(state): scaled = scaler.transform([state]) featurized = featurizer.transform(scaled) return featurized[0] def build_policy_net_MountainCarContinuous(input_tf): mu = tf.layers.dense(input_tf, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value sigma = tf.layers.dense(input_tf, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance return mu,sigma; class PolicyEstimator_MountainCarContinuous(): def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope="policy_estimator"): w_init = tf.random_normal_initializer(0.,.1);
tensorflow.layers.dense
8,968
import tensorflow as tf :return [Tensor] [N, H', W', C]. Convolution results. """ blk_indices_ = tf.reshape(blk_indices, [-1, 3]) blk_shape = tf.shape(blk_indices) ksize = tf.shape(w)
tensorflow.shape
8,969
import tensorflow as tf if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores,
tensorflow.contrib.cluster_resolver.TPUClusterResolver
8,970
import tensorflow as tf predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability)
tensorflow.gfile.GFile
8,971
import tensorflow as tf logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps) print('Starting a training cycle.') xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook]) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
tensorflow.logging.set_verbosity
8,972
import tensorflow as tf progress = networks.compute_progress( current_image_id_ph, stable_stage_num_images, transition_stage_num_images, num_blocks=3) x = tf.random_normal([2, 16, 16, 3]) logits, _ = networks.discriminator( x, progress, _num_filters_stub, networks.ResolutionSchedule( start_resolutions=(4, 4), scale_base=2, num_resolutions=3)) fake_loss = tf.reduce_sum(tf.square(logits)) grad_norms = [ _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*')) ] grad_norms_output = None with self.test_session(use_gpu=True) as sess: sess.run(tf.global_variables_initializer()) grad_norms_output = np.array([ sess.run(grad_norms, feed_dict={current_image_id_ph: i}) for i in range(15) # total num of images ]) # The gradient of block_1 is always on. self.assertEqual(
tensorflow.trainable_variables
8,973
from tensorflow.python.framework import ops return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] ops.RegisterShape("L2Loss")(common_shapes.scalar_shape) ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4)) @ops.RegisterShape("LRNGrad") def _LRNGradShape(op): """Shape function for LRNGrad op.""" in_grads_shape = op.inputs[0].get_shape().with_rank(4) in_image_shape = op.inputs[1].get_shape().with_rank(4) out_image_shape = op.inputs[2].get_shape().with_rank(4) return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]
tensorflow.python.framework.ops.RegisterShape
8,974
import tensorflow as tf # calc l2 losses l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer
tensorflow.nn.xw_plus_b
8,975
import tensorflow as tf # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(pred, even) pred2 = tf.gather(pred, odd) tgt1 = tf.gather(tgt, even) tgt2 = tf.gather(tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1)
tensorflow.gather
8,976
import tensorflow as tf loss, _, _ = l2hmc.compute_loss(dynamics, x) optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate) train_op, loss, _ = graph_step(dynamics, optimizer, x) # Single thread; fairer comparison against eager session_conf = tf.ConfigProto(inter_op_parallelism_threads=1) with tf.Session(config=session_conf) as sess: sess.run(tf.global_variables_initializer()) # Warmup to reduce initialization effect when timing for _ in range(hparams.n_warmup_iters): _, _ = sess.run([train_op, loss]) # Training
tensorflow.Session
8,977
import tensorflow as tf def _pred_graph(self, data): with tf.name_scope('pred'): with tf.device('/gpu:0'): pred_out = self._model(data, Mode.PRED, **self.config) self.pred_out = {n: tf.identity(p, name=n) for n, p in pred_out.items()} def _build_graph(self): # Training and evaluation network, if tf datasets provided
tensorflow.identity
8,978
import tensorflow as tf n_out: integer, depth of input maps phase_train: boolean tf.Variable, true indicates training phase scope: string, variable scope affn: whether to affn-transform outputs Return: normed: batch-normalized maps Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177 """ name = 'batch_norm' with tf.variable_scope(name): phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool) n_out = int(x.get_shape()[3]) beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype), name=name+'/beta', trainable=True, dtype=x.dtype) gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype), name=name+'/gamma', trainable=True, dtype=x.dtype) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = control_flow_ops.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed
tensorflow.constant
8,979
import tensorflow as tf if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
tensorflow.nn.dropout
8,980
import tensorflow as tf weights[CLUSTERING_IMPL].get_pulling_indices( weights[ORIGINAL_WEIGHTS]), weights[PULLING_INDICES].dtype )) output = weights[CLUSTERING_IMPL].get_clustered_weight( weights[PULLING_INDICES], weights[ORIGINAL_WEIGHTS]) inputs.assign(output) else: if self.preserve_sparsity: inputs = tf.multiply(inputs, weights[SPARSITY_MASK]) output = inputs else: output = inputs return quant_ops.LastValueQuantize( output, weights['min_var'], weights['max_var'], is_training=training,
tensorflow.multiply
8,981
import tensorflow as tf def benchmark_one_step(sess, fetches, step, batch_size, step_train_times, trace_filename, summary_op=None): """Advance one step of benchmarking.""" if trace_filename is not None and step == -1: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None
tensorflow.RunOptions
8,982
import tensorflow as tf X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2]) M = tf.placeholder(tf.float32, [None, 2, n_ctx]) Y_train = tf.placeholder(tf.int32, [n_batch_train]) Y = tf.placeholder(tf.int32, [None])
tensorflow.placeholder
8,983
import tensorflow as tf def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = tf.shape(data) size = tf.concat(0, [shape[:1] // parts, shape[1:]]) stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0]) start = stride * idx return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs]
tensorflow.device
8,984
import tensorflow as tf "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string(
tensorflow.flags.DEFINE_string
8,985
import tensorflow as tf #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
tensorflow.reshape
8,986
import tensorflow as tf loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf)
tensorflow.global_variables_initializer
8,987
import tensorflow as tf tf.reset_default_graph() ts = 0 with tf.device("/gpu:0"): approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type); with tf.Session(config=config) as sess: t6 = time.time() sv3 = sess.run(approx_scskconv) t5 = time.time() for i in range(0, num_trials): sess.run(approx_scskconv) t6 = time.time() ts = abs(t6 - t5) / max(num_trials,1) print("time approx sparse: ", ts) tf.reset_default_graph() time.sleep(1) if dense: td = 0 with tf.device("/gpu:0"): conv = nn_ops.conv3d(d1, d2, strides, padding) with tf.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv)
tensorflow.reset_default_graph
8,988
import tensorflow as tf jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) #samples = [] #for i in range(self.num_latent_functions):
tensorflow.transpose
8,989
import tensorflow as tf return inst_weights inst_weights = gather_init_weights() bs = FLAGS.train_batch_size hw = FLAGS.src_hw inst_weights, indices = tf.nn.top_k( inst_weights, k=bs, sorted=True, ) src_features = tf.reshape(src_features, [ bs * FLAGS.source_train_batch_multiplier, hw, hw, 1, ]) src_features = tf.gather(src_features, indices, axis=0) src_features = tf.stop_gradient(src_features) src_labels = tf.gather(src_labels, indices)
tensorflow.reshape
8,990
import tensorflow as tf with tf.Session() as sess: sess.run(tf.global_variables_initializer())
tensorflow.global_variables_initializer
8,991
import tensorflow as tf class Saliency(GradientBasedMethod): def get_symbolic_attribution(self): return [tf.abs(g) for g in tf.gradients(ys=self.T, xs=self.X)] """ Gradient * Input https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270 """ class GradientXInput(GradientBasedMethod): def get_symbolic_attribution(self): return [g * x for g, x in zip( tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] """ Layer-wise Relevance Propagation with epsilon rule http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0130140 """ class EpsilonLRP(GradientBasedMethod): eps = None def __init__(self, T, X, session, keras_learning_phase, epsilon=1e-4, Y_shape=None): assert epsilon > 0.0, 'LRP epsilon must be greater than zero'
tensorflow.gradients
8,992
import tensorflow as tf # Benchmarking code def parameterized_vs_naive(shape, num_iters, use_gpu=False): np.random.seed(1618) # Make it reproducible. # No CSE/CF. optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0) config = tf.ConfigProto( graph_options=tf.GraphOptions(optimizer_options=optimizer_options)) with tf.Session(config=config) as sess: with tf.device("/cpu:0" if not use_gpu else None): param_op = tf.group(random_ops.parameterized_truncated_normal(shape)) naive_op = tf.group(random_ops.truncated_normal(shape)) # Burn-in to avoid session setup costs in the timing. sess.run(param_op) sess.run(param_op) param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters) sess.run(naive_op) sess.run(naive_op) naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters) return param_dt, naive_dt
tensorflow.device
8,993
import tensorflow as tf final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = tf.reduce_mean(pred, axis=1) traj_tgt = tf.reduce_mean(tgt, axis=1) p1, p2 = tf.split(traj_pred, 2, axis=0) t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12):
tensorflow.split
8,994
import tensorflow as tf add = tf.assign_add(p, y) p.initializer.run() new_value = add.eval() return p.eval(), new_value def _initAssignSubFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param -= y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) sub = tf.assign_sub(p, y) p.initializer.run() new_value = sub.eval() return p.eval(), new_value def _testTypes(self, vals): for dtype in [np.float32, np.float64, np.int32, np.int64]:
tensorflow.Variable
8,995
import tensorflow as tf gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0] with tf.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=tf.AUTO_REUSE): backbone = xdet_body_v3.xdet_resnet_v3(params['resnet_size'], params['data_format']) body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN)) cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format']) if params['data_format'] == 'channels_first': cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1]) location_pred = tf.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4]) glabels = tf.reshape(glabels, [-1]) gscores = tf.reshape(gscores, [-1]) gtargets = tf.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = tf.cast(positive_mask, tf.float32) n_positives = tf.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.) negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.) #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
tensorflow.reshape
8,996
import tensorflow as tf top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c] same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
tensorflow.expand_dims
8,997
import tensorflow as tf def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with tf.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = tf.nn.bias_add(conv, conv_biases) tf.summary.histogram('weight', filt) tf.summary.histogram('bias', conv_biases) return bias def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True):
tensorflow.summary.histogram
8,998
import tensorflow as tf tf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\'s save path')
tensorflow.app.flags.DEFINE_string
8,999