seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.mu = self.mu * action_bound[1]; self.sigma = self.sigma + 1e-5 self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);
tensorflow.contrib.distributions.Normal
10,200
import tensorflow as tf step=global_step) tf.contrib.summary.scalar( 'rpn_box_loss', tf.reduce_mean(rpn_box_loss), step=global_step) tf.contrib.summary.scalar( 'total_fast_rcnn_loss', tf.reduce_mean(total_fast_rcnn_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_class_loss', tf.reduce_mean(fast_rcnn_class_loss),
tensorflow.reduce_mean
10,201
import tensorflow as tf self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope
tensorflow.constant_initializer
10,202
import tensorflow as tf normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return # Test case 3. x = tf.placeholder_with_default(input=1, shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertTrue(self.evaluate(is_scalar)) x = tf.placeholder_with_default(input=[1], shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertFalse(self.evaluate(is_scalar)) def _GetFakeDistribution(self): class FakeDistribution(tfd.Distribution):
tensorflow.shape
10,203
import tensorflow as tf coord.join() def test_get_inputs_cancelled(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(tf.constant([1])) # Intentionally using tf.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with tf.Session(): coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) # Sleep to make sure the queue runner has started the first run call. time.sleep(_SLEEP_TIME) # Session closed. coord.request_stop() coord.join() def test_batcher_closed(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn
tensorflow.Session
10,204
import tensorflow as tf def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None): with tf.device('/cpu:0'): var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES])
tensorflow.get_variable
10,205
import tensorflow as tf # (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]), # [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (-5.0, -5.0, -5.0), # (5.0, 5.0, 5.0), # [self.resolution, self.resolution, self.resolution]) samples_world = tf.reshape(samples_world, [-1, 3]) ious = [] status = False if status: _, axs = plt.subplots(labeled_translations.shape[0], 5)
tensorflow.reshape
10,206
import tensorflow as tf tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
tensorflow.variable_scope
10,207
import tensorflow as tf # Image and Mask must be same dimension by now. Both have dimensions (x, y, 3) h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) print(image.shape) print(mask.shape) input_image = np.concatenate([image, mask], axis=2) print(input_image.shape) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = MODEL.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name)
tensorflow.ConfigProto
10,208
import tensorflow as tf resized = tf.nn.conv3d_transpose( value=x, filter=kernel, output_shape=y_size, strides=[1] + strides + [1], padding=self.padding, name='resize_x_to_y') resized = tf.nn.bias_add( resized, bias) resized = self.ff_nl(resized) return resized elif mode == 'replicate_n_transpose': resized = tf.image.resize_images( x, y_size[:-1], kernel, align_corners=False) resized = tf.nn.conv3d_transpose( value=resized, filter=kernel, output_shape=y_size, strides=[1, 1, 1, 1, 1], padding='SAME', name='resize_x_to_y') resized = tf.nn.bias_add(
tensorflow.image.resize_images
10,209
import tensorflow as tf # pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) # pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) # tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) # tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) # loss = compute_contra_loss(pred1, pred2, tgt1, tgt2) # print(loss) # return loss i = tf.constant(0) loss = tf.constant(0.) final_loss = tf.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0] # final_loss = tf.scan(sample_compute, tf.range(resample), loss)[-1] # final_loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems= tf.range(resample), dtype=tf.float32, parallel_iterations=1) # print('final', final_loss) # final_loss = loss avg_loss = tf.reduce_mean(final_loss) / divider # p = tf.print('cur_loss', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = tf.identity(avg_loss) # print(final_loss, avg_loss) # p = tf.print('debug loss ', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = 1. * avg_loss # print(avg_loss) # exit() return avg_loss def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
tensorflow.reduce_mean
10,210
import tensorflow as tf tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed
tensorflow.data.TFRecordDataset
10,211
import tensorflow as tf if activation_function == "elu": conv = tf.nn.elu(conv, name = 'elu') return conv def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"): with tf.variable_scope(name): deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None) if do_norm: deconv = tf.layers.batch_normalization(deconv, momentum = 0.9) if activation_function == "relu": deconv = tf.nn.relu(deconv, name = 'relu') if activation_function == "leakyrelu": deconv = tf.nn.leaky_relu(deconv, alpha=relu_factor) if activation_function == "elu": deconv = tf.nn.elu(deconv, name = 'elu') return deconv
tensorflow.nn.relu
10,212
import tensorflow as tf hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
tensorflow.variable_scope
10,213
import tensorflow as tf pool += shortcut pool = fixed_padding(inputs=pool) return tf.layers.conv1d(inputs=pool, filters=pool.get_shape()[2]*2, kernel_size=1, strides=1, padding='valid', use_bias=False) def fixed_padding(inputs, kernel_size=3): pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [0, 0]]) return padded_inputs class VDCNN(): def __init__(self, num_classes, sequence_max_length=20, num_quantized_chars=50000, tags_vocab_size=44, deps_vocab_size=47, embedding_size=300, depth=9, downsampling_type='maxpool', use_he_uniform=True, optional_shortcut=False): # Depth to No. Layers
tensorflow.pad
10,214
import tensorflow as tf y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0) element = [x,x_coori] ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Analysis Transform") def loop_hyper_encoder(y): y = tf.expand_dims(y, 0) z = hyper_encoder(y) return tf.squeeze(z,axis=0) zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Hyper Encoder") z_hats, _ = entropy_bottleneck(zs, False) print("Quantize hyperprior") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0]) locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32),
tensorflow.map_fn
10,215
import tensorflow as tf from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.utils import config_util from object_detection.utils import test_case FLAGS = tf.flags.FLAGS def _get_configs_for_model(model_name): """Returns configurations for model.""" fname = os.path.join(tf.resource_loader.get_data_files_path(), 'samples/configs/' + model_name + '.config') label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), 'data/pet_label_map.pbtxt') data_path = os.path.join(tf.resource_loader.get_data_files_path(), 'test_data/pets_examples.record') configs = config_util.get_configs_from_pipeline_file(fname) override_dict = { 'train_input_path': data_path, 'eval_input_path': data_path,
tensorflow.resource_loader.get_data_files_path
10,216
import tensorflow as tf init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding matrices for entities and relationship types head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd) rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tensorflow.truncated_normal
10,217
import tensorflow as tf def decode_binary(data_bytes): # tf.decode_raw does not support bool as a decode type. As a result it is # necessary to decode to int8 (7 of the bits will be ignored) and then # cast to bool. return tf.reshape(tf.cast(tf.decode_raw(data_bytes, tf.int8), tf.bool), (batch_size,)) if self._is_training: mask_start_index = tf.decode_raw(
tensorflow.decode_raw
10,218
import tensorflow as tf config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={ testnum: test_num, trainnum: train_num, learnrate:lr } for i in range(loop_count): loss_np, _, label_np, image_np, inf_np = sess.run( [loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict) if i > 0 and i % report_step == 0:
tensorflow.train.Saver
10,219
import tensorflow as tf scores = d_layer_3_all if mask is not None: mask = tf.equal(mask, tf.ones_like(mask)) key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T]
tensorflow.where
10,220
import tensorflow as tf def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
tensorflow.python_io.TFRecordWriter
10,221
import tensorflow as tf else: probabilities = logits logits = tf.squeeze(logits, [-1]) predictions = logits per_example_loss = tf.square(logits - labels) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions)
tensorflow.square
10,222
import tensorflow as tf tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length, ) ) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover
tensorflow.data.TFRecordDataset
10,223
import tensorflow as tf tf.random.categorical( logits[None], num_samples=self._sample_batch_size)) mask = tf.cast(mask, tf.float32)[:, None] relabelled_tasks = mask * orig_tasks + (1 - mask) * relabelled_tasks
tensorflow.cast
10,224
import tensorflow as tf tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'):
tensorflow.nn.relu
10,225
import tensorflow as tf def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients]
tensorflow.summary.histogram
10,226
import tensorflow as tf inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = tf.nn.relu(inputs) return inputs def _bottleneck_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck. Similar to _building_block_v2(), except using the "bottleneck" blocks described in: Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf
tensorflow.nn.relu
10,227
import tensorflow as tf in_shape = inputdata.get_shape().as_list() channel_axis = 3 if data_format == 'channels_last' else 1 in_channel = in_shape[channel_axis] assert in_channel is not None, "[Deconv2D] Input cannot have unknown channel!" padding = padding.upper() if w_init is None: w_init = tf.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = tf.constant_initializer() ret = tf.layers.conv2d_transpose(inputs=inputdata, filters=out_channel, kernel_size=kernel_size, strides=stride, padding=padding, data_format=data_format,
tensorflow.contrib.layers.variance_scaling_initializer
10,228
import tensorflow as tf @layer def relu_layer(tensor): out = tf.nn.relu(tensor) return out @layer def tanh_layer(tensor): out = tf.nn.tanh(tensor) return out @layer def softmax_layer(tensor, softmax_func=None, **opts): if softmax_func is None: softmax_func = tf.nn.softmax out = softmax_func(tensor)
tensorflow.nn.tanh
10,229
import tensorflow as tf scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
tensorflow.matmul
10,230
import tensorflow as tf tf.flags.DEFINE_boolean( 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. tf.flags.DEFINE_integer('summary_verbosity', 0,
tensorflow.flags.DEFINE_string
10,231
import tensorflow as tf # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions,
tensorflow.placeholder
10,232
import tensorflow as tf tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None) mdl.BProp() self.assertEqual(decoder_theta, mdl.theta.decoder) def testFProp(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) actual_var_names = [_.name for _ in tf.all_variables()] print('all vars \n', '\n'.join(actual_var_names)) expected_var_names = [ 'global_step:0', 'test_mdl/enc/conv_L0/w/var:0', 'test_mdl/enc/conv_L0/beta/var:0', 'test_mdl/enc/conv_L0/gamma/var:0', 'test_mdl/enc/conv_L0/moving_mean/var:0', 'test_mdl/enc/conv_L0/moving_variance/var:0', 'test_mdl/enc/conv_L1/w/var:0', 'test_mdl/enc/conv_L1/beta/var:0', 'test_mdl/enc/conv_L1/gamma/var:0', 'test_mdl/enc/conv_L1/moving_mean/var:0', 'test_mdl/enc/conv_L1/moving_variance/var:0',
tensorflow.global_variables_initializer
10,233
from tensorflow.python.ops import math_ops def _cdf(self, x): x = self._assert_valid_sample(x, check_integer=False) return math_ops.igammac(math_ops.floor(x + 1), self.rate)
tensorflow.python.ops.math_ops.floor
10,234
from tensorflow.python.ops import common_shapes """Shape function for LRNGrad op.""" in_grads_shape = op.inputs[0].get_shape().with_rank(4) in_image_shape = op.inputs[1].get_shape().with_rank(4) out_image_shape = op.inputs[2].get_shape().with_rank(4) return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)] ops.RegisterShape("Softmax")( common_shapes.unchanged_shape_with_rank(2)) @ops.RegisterShape("InTopK") def _InTopKShape(op): """Shape function for InTopK op.""" predictions_shape = op.inputs[0].get_shape().with_rank(2) targets_shape = op.inputs[1].get_shape().with_rank(1)
tensorflow.python.ops.common_shapes.unchanged_shape_with_rank
10,235
import tensorflow as tf double_obs_ph = target_policy.obs_ph if double_q: with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")): double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers) double_q_values = double_policy.q_values double_obs_ph = double_policy.obs_ph with tf.variable_scope("loss", reuse=reuse): # set up placeholders act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1) q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1) else: q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
tensorflow.placeholder
10,236
import tensorflow as tf tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256)
tensorflow.reshape
10,237
import tensorflow as tf def unsqueeze_2x2(input_): """Unsqueezing operation: reshape to convert channels into space.""" if isinstance(input_, (float, int)): return input_ shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] if channels % 4 != 0: raise ValueError("Number of channels not divisible by 4.") res = tf.reshape(input_, [batch_size, height, width, channels // 4, 2, 2]) res = tf.transpose(res, [0, 1, 4, 2, 5, 3]) res = tf.reshape(res, [batch_size, 2 * height, 2 * width, channels // 4]) return res # batch norm def batch_norm(input_, dim, name, scale=True, train=True, epsilon=1e-8, decay=.1,
tensorflow.reshape
10,238
import tensorflow as tf tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold()
tensorflow.train.Scaffold
10,239
import tensorflow as tf #print(np.shape(up1)) up2 = common_deconv2d(up1,self.gf*4,name='up2') # 16x16 -> 32x32 up3 = common_deconv2d(up2,self.gf*2,name='up3') # 32x32 -> 64x64 up4 = common_deconv2d(up3,self.gf,name='up4') # 64x64 -> 128x128 out_img = tf.contrib.layers.conv2d_transpose(up4,self.channels,kernel_size=4,stride=2,padding='SAME',activation_fn=tf.nn.tanh) # 128x128 -> 256x256 #print('out_img',(np.shape(out_img))) return out_img def build_discriminator(self,image,reuse=False,name='discriminator'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.variable_scope
10,240
import tensorflow as tf offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and(
tensorflow.shape
10,241
import tensorflow as tf 'image_size': dataset.image_size, 'K': dataset.classes, 'norm_mean': norm_mean, 'norm_std': norm_std } return (images, classes, train_params) def _build_model(self, **knobs): use_dynamic_arch = knobs['downscale'] # Create graph graph = tf.Graph() with graph.as_default(): # Define input placeholders to graph ph = self._make_placeholders() # Use fixed archs if specified, otherwise use placeholders' (normal_arch, reduction_arch) = self._get_fixed_cell_archs(**knobs) normal_arch = normal_arch if not use_dynamic_arch else ph.normal_arch reduction_arch = reduction_arch if not use_dynamic_arch else ph.reduction_arch # Initialize steps variable
tensorflow.Graph
10,242
import tensorflow as tf k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
tensorflow.random_normal_initializer
10,243
import tensorflow as tf @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs_raw"] # Axis 0 - Batch. # Axis 1 - Input Frames, 4 frames. # Axis 2, 3 - Height & Width. # Axis 4 - Channels RGB, 3 colours. x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=tf.nn.relu, padding="same") x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=tf.nn.relu, padding="same") flat_x = tf.layers.flatten(x) if self.use_epochs: epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) # Randomly set epoch to 0 in some cases as that's the inference value. rand = tf.random.uniform([x_shape[0]])
tensorflow.reshape
10,244
import tensorflow as tf h5f.close() if len(timestamps_batch) > 0: yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch) def cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes): logits = cnn_model(x, amp_factor=amp_factor) logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = tf.nn.rnn_cell.LSTMCell(128) backward_cell = tf.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits, dtype=tf.float32 ) encoder_outputs = tf.concat(encoder_outputs, axis=2) logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes]) return logits
tensorflow.nn.rnn_cell.LSTMCell
10,245
import tensorflow as tf return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1): """Preprocessing for WMT: filter exceeding maximum length and concatenate.""" dataset = wmt_preprocess(dataset, training, max_length, max_eval_length) def concat_and_add_mask(features, targets): inp = features['inputs'] pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0) concat = tf.concat([inp, pad, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0) features['inputs'] = concat features['mask'] = mask return features, concat dataset = dataset.map(concat_and_add_mask) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training'])
tensorflow.concat
10,246
import tensorflow as tf for k, v in six.iteritems(features): v = tf.convert_to_tensor(v) v_shape = common_layers.shape_list(v) if not v_shape: v = tf.expand_dims(v, axis=-1) v_shape = [1] if v_shape == [1]: v = tf.tile(v, [self._num_datashards]) sharded_features[k] = self._data_parallelism(tf.identity, tf.split( v, self._num_datashards, 0)) return sharded_features
tensorflow.tile
10,247
from tensorflow.contrib.eager.python.examples.spinn import data inference_sentences=("( foo ( bar . ) )", None)) with self.assertRaises(ValueError): spinn.train_or_infer_spinn(embed, word2index, None, None, None, config) def testTrainSpinn(self): """Test with fake toy SNLI data and GloVe vectors.""" # 1. Create and load a fake SNLI data file and a fake GloVe embedding file. snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0") fake_train_file = self._create_test_data(snli_1_0_dir) vocab = data.load_vocabulary(self._temp_data_dir) word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab) train_data = data.SnliData(fake_train_file, word2index) dev_data = data.SnliData(fake_train_file, word2index) test_data = data.SnliData(fake_train_file, word2index) # 2. Create a fake config. config = _test_spinn_config( data.WORD_VECTOR_LEN, 4, logdir=os.path.join(self._temp_data_dir, "logdir")) # 3. Test training of a SPINN model. trainer = spinn.train_or_infer_spinn( embed, word2index, train_data, dev_data, test_data, config) # 4. Load train loss values from the summary files and verify that they # decrease with training. summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
tensorflow.contrib.eager.python.examples.spinn.data.SnliData
10,248
import tensorflow as tf # this creates an operation to add to all trainable variables a white noise of param # std = tf.sqrt(variance)/10 def create_random_update_op(self): vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) update_opts = [] for var in vars: _, variance = tf.nn.moments(tf.reshape(var,[-1]),axes=[0]) normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance)/10) white_noise = normal.sample(var.get_shape()) update_opts.append(var.assign(var + white_noise)) self.random_update_op = tf.group(update_opts) #apply clipping
tensorflow.reshape
10,249
import tensorflow as tf stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', out_dim=32) .FullyConnected('fct', out_dim=6, nl=tf.identity, W_init=tf.constant_initializer(), b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))()) # output 6 parameters for affine transformation stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3 stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = ImageSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
tensorflow.transpose
10,250
import tensorflow as tf eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size iterations_per_loop = (eval_steps if FLAGS.mode == 'eval' else FLAGS.iterations_per_loop) save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_shards, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 )) return run_config def build_image_serving_input_receiver_fn(shape, dtype=tf.float32):
tensorflow.contrib.tpu.TPUConfig
10,251
import tensorflow as tf label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(label_ids.shape[0], dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold
tensorflow.trainable_variables
10,252
import tensorflow as tf y_net = net(x) if use_pretrained or pretrained_model_file_path: from .tensorflowcv.model_provider import init_variables_from_state_dict with tf.Session() as sess: from .tensorflowcv.model_provider import load_state_dict if pretrained_model_file_path: init_variables_from_state_dict(
tensorflow.Session
10,253
import tensorflow as tf eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else:
tensorflow.matmul
10,254
import tensorflow as tf # [batch, 1, 1] y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped
tensorflow.matmul
10,255
import tensorflow as tf def conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0, dilation=1): assert padding in ["SAME", "VALID", "REFLECT", "PARTIAL"], 'valid paddings: "SAME", "VALID", "REFLECT", "PARTIAL"' if type(size) == int: size = [size, size] if init_stddev <= 0.0: init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) else: init = tf.truncated_normal_initializer(stddev=init_stddev) if padding == "PARTIAL": with tf.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list() slide_window = size[0] * size[1] mask = tf.ones(shape=[1, h, w, 1]) update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id, kernel_size=size, kernel_initializer=tf.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = tf.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask
tensorflow.variable_scope
10,256
import tensorflow as tf input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5
tensorflow.Session
10,257
import tensorflow as tf def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): self.dropout = self.get_dropout(self.config["dropout_rate"], is_training) self.lexical_dropout = self.get_dropout(self.config["lexical_dropout_rate"], is_training) self.lstm_dropout = self.get_dropout(self.config["lstm_dropout_rate"], is_training) num_sentences = tf.shape(context_word_emb)[0] max_sentence_length = tf.shape(context_word_emb)[1] context_emb_list = [context_word_emb] head_emb_list = [head_word_emb] if self.config["char_embedding_size"] > 0: char_emb = tf.gather(tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]]), char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
tensorflow.shape
10,258
import tensorflow as tf # to all be the same size along the batch dimension. for elem_shape in elem_shapes: if (not elem_shape or not elem_shape[0] or elem_shape[0] != elem_shapes[0][0]): return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop) arg_tuples = zip(*[tf.unstack(elem) for elem in elems]) outputs = [fn(arg_tuple) for arg_tuple in arg_tuples] else: if not isinstance(elems, tf.Tensor): raise ValueError('`elems` must be a Tensor or list of Tensors.')
tensorflow.unstack
10,259
import tensorflow as tf inputs.append(inputs_) encoder_inputs_ = tf.concat(inputs, axis=2) # if encoder.convolution_activation.lower() == 'relu': encoder_inputs_ = tf.nn.relu(encoder_inputs_) if encoder.maxout_stride: if encoder.binary: raise NotImplementedError stride = encoder.maxout_stride k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]]) encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value with tf.variable_scope('highway_{}'.format(j + 1)):
tensorflow.shape
10,260
import tensorflow as tf top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1] top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb] attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb] with tf.variable_scope("f"): f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb] top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb] top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1] top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c] same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1] pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c] dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1] top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # [] return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends): span_emb_list = [] span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb] span_emb_list.append(span_start_emb)
tensorflow.logical_and
10,261
import tensorflow as tf # ul_u_eval_train = random_sphere(ul_images_eval_train.shape) # ul_u_eval_test = random_sphere(images_eval_test.shape) ul_u = placeholder_like(ul_images, "ul_u") ul_u_eval_train = placeholder_like(ul_images_eval_train, "ul_u_eval_train") ul_u_eval_test = placeholder_like(images_eval_test, "ul_u_eval_test") with tf.device(FLAGS.device): lr = tf.placeholder(tf.float32, shape=[], name="learning_rate") mom = tf.placeholder(tf.float32, shape=[], name="momentum") with tf.variable_scope("CNN") as scope: # Build training graph loss, train_op, global_step, ul_u_updated = build_training_graph( images, labels, ul_images, ul_u, lr, mom) scope.reuse_variables()
tensorflow.placeholder
10,262
import tensorflow as tf data_items_to_decoders = { "inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"), "targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"), }
tensorflow.contrib.slim.tfexample_decoder.Tensor
10,263
import tensorflow as tf else: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) b = bias_variable([FLAGS.feats_per_layer]) Wx_b = tf.nn.conv3d(bottom, W, strides=[1,1,1,1,1], padding='VALID') + b out = tf.nn.relu(Wx_b) shape = out.get_shape() print('conv{}'.format(l+1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('pool'): bottom = out if l == num_layers - 1 and FLAGS.total_pool: kernel_size = bottom.get_shape()[2] out = tf.nn.max_pool3d(bottom, ksize=[1,1, kernel_size, kernel_size,1], strides=[1,1,1,1,1], padding='VALID') else: out = tf.nn.max_pool3d(bottom, ksize=[1,1, FLAGS.pool_kernel, FLAGS.pool_kernel,1], strides=[1,1,FLAGS.pool_stride,FLAGS.pool_stride,1], padding='VALID') shape = out.get_shape() print('pool{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('scale'): bottom = out if FLAGS.pm[l + 1] == FLAGS.pm[l]: kernel_size = 1 # useless 1x1 pooling elif int(FLAGS.pm[l + 1]) < int(FLAGS.pm[l]): num_scales_prev = int(FLAGS.pm[l]) num_scales_current = int(FLAGS.pm[l + 1]) kernel_size = (num_scales_prev - num_scales_current) + 1
tensorflow.nn.max_pool3d
10,264
import tensorflow as tf out = tf.reshape(out, [self.out_modes[i], -1]) out = tf.transpose(out, [1, 0]) out = tf.reshape(out, [-1, np.prod(self.out_modes)]) # self.image_max_size = max(self.image_max_size, np.prod([val.value for val in out.get_shape()[1:]])) if self.use_bias: out = tf.add(out, self.bias, name='out') if self.activation is not None: out = self.activation(out) return out
tensorflow.add
10,265
import tensorflow as tf if not no_moving_average: moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0) moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1) if is_train: # For training, do batch norm with batch mean & variance # Update moving averages if training (X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True) update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay) update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay) with tf.control_dependencies([update_mean, update_variance]): X = tf.identity(X) else: # For prediction, do batch norm with computed moving mean & variance from training # Don't update moving averages if predicting (X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance, epsilon=epsilon, is_training=False) else: (X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True) return X
tensorflow.identity
10,266
import tensorflow as tf self.updates = tf.group(denoise_updates, ranker_updates) def DenoisingNet(self, list_size, forward_only=False, scope=None): with tf.variable_scope(scope or "denoising_model"): # If we are in testing, do not compute propensity if forward_only: return tf.ones_like(self.output)#, tf.ones_like(self.output) input_vec_size = list_size*4 def propensity_network(input_data, index): reuse = None if index < 1 else True propensity_initializer = tf.constant_initializer(0.001) if self.hparams.constant_propensity_initialization else None with tf.variable_scope("propensity_network", initializer=propensity_initializer, reuse=reuse): output_data = input_data current_size = input_vec_size output_sizes = [ int((list_size+1)/2) + 1, int((list_size+1)/4) + 1, 1 ] for i in range(len(output_sizes)): expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]]) expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]]) output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
tensorflow.variable_scope
10,267
import tensorflow as tf def init_bias(shape, name=None): return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32))
tensorflow.zeros
10,268
from tensorflow.python.ops import math_ops with variable_scope.variable_scope(name, 'mean', [values, weights]): values = math_ops.to_float(values) total = _create_local('total', shape=[]) count = _create_local('count', shape=[]) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.reduce_sum(_broadcast_weights(weights, values)) else: num_values = math_ops.to_float(array_ops.size(values)) total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) count_compute_op = state_ops.assign_add(count, num_values) mean = _safe_div(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = _safe_div(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.ops.math_ops.reduce_sum
10,269
import tensorflow as tf " dtype=float64>") # There's no notion of partially known shapes in eager mode, so exit # early. if tf.executing_eagerly(): return mvn_dynamic = tfd.MultivariateNormalDiag(
tensorflow.executing_eagerly
10,270
from tensorflow.python.ops import array_ops return math_ops.argmax(logits, 1) # TODO(zakaria): use contrib losses. def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target):
tensorflow.python.ops.array_ops.expand_dims
10,271
import tensorflow as tf def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): rois, rpn_scores = tf.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([cfg.FLAGS.rpn_top_n, 5]) rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1]) return rois, rpn_scores def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): # 返回的rois中多加了一列0在第一列 rois, rpn_scores = tf.py_func(proposal_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([None, 5]) rpn_scores.set_shape([None, 1]) return rois, rpn_scores def _crop_pool_layer(self, bottom, rois, name): with tf.variable_scope(name): # tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果 batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1]) # Get the normalized coordinates of bboxes
tensorflow.py_func
10,272
import tensorflow as tf per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tensorflow.logging.info
10,273
import tensorflow as tf else: assert tf.get_variable_scope().reuse is False """U-Net Generator""" def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable('scale',[x.get_shape()[-1]],
tensorflow.variable_scope
10,274
import tensorflow as tf self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 4].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 4]) print(mtype, fig_obj_count, 2) fig_obj_count += 1 intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) union = tf.reduce_sum(tf.math.sign(sdf_values)) iou = intersection / union if not tf.math.is_nan(iou): ious.append(iou) status3 = False
tensorflow.reduce_max
10,275
import tensorflow as tf int(maximum_lower_multiple(x // num_replicas_in_sync, 8) * num_replicas_in_sync) for x in bucket_batch_sizes] else: bucket_batch_sizes = int(maximum_lower_multiple( bucket_batch_sizes // num_replicas_in_sync, 8) * num_replicas_in_sync) return dataset_utils.batch_examples_by_token( dataset, bucket_boundaries=bucket_boundaries, bucket_batch_sizes=bucket_batch_sizes, padding_values=padding_values, example_length_func=lambda x: {"feature": tf.size(x["feature"]), "label": tf.size(x["label"])}, extra_padded_shapes={"src_lang": [], "trg_lang": []} ) def build_metric_layer(self): return [SequenceTokenMetricLayer("src"), SequenceTokenMetricLayer("trg"), BatchCountMetricLayer("src")] def get_eval_metric(self, args, name="metric", ds=None): """ Returns a neurst.metrics.metric.Metric object for evaluation.""" if ds is None or not hasattr(ds, "trg_lang") or ds.trg_lang is None:
tensorflow.size
10,276
import tensorflow as tf beta_list.append(beta) with tf.variable_scope('lstm', reuse=(t!=0)): _, (c, h) = lstm_cell(inputs=tf.concat(axis=1, values=[x, context]), state=[c, h]) logits = self._decode_lstm(x, h, context, reuse=(t!=0)) sampled_word = tf.argmax(logits, 1) sampled_word_list.append(sampled_word) alphas = tf.transpose(tf.stack(alpha_list), (1, 0, 2)) # (N, T, L) betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T) sampled_captions = tf.transpose(tf.stack(sampled_word_list), (1, 0)) # (N, max_len) return alphas, betas, sampled_captions
tensorflow.squeeze
10,277
import tensorflow as tf with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello")
tensorflow.initialize_all_variables
10,278
import tensorflow as tf top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k] top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
tensorflow.gather
10,279
import tensorflow as tf with tf.Session() as sess: with tf.device("/cpu:0"): indices = tf.Variable(indices) values = tf.Variable(values) shape = tf.Variable(shape) st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st)
tensorflow.Variable
10,280
import tensorflow as tf from tensorflow.python.framework import ops DEFAULT_BN_LAG = .0 def stable_var(input_, mean=None, axes=[0]): """Numerically more stable variance computation.""" if mean is None: mean = tf.reduce_mean(input_, axes) res = tf.square(input_ - mean) max_sqr = tf.reduce_max(res, axes) res /= max_sqr res = tf.reduce_mean(res, axes) res *= max_sqr return res def variable_on_cpu(name, shape, initializer, trainable=True): """Helper to create a Variable stored on CPU memory.
tensorflow.reduce_max
10,281
import tensorflow as tf tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = tf.Summary(value=values)
tensorflow.Summary.Value
10,282
import tensorflow as tf average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
tensorflow.nn.seq2seq.sequence_loss_by_example
10,283
import tensorflow as tf if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.logging.info
10,284
import tensorflow as tf passage_word_idx, passage_mask, v, w_c, word_vocab) vocab_scores = tf.log(output_t) greedy_prediction = tf.reshape(tf.argmax(output_t, 1),[-1]) # calcualte greedy multinomial_prediction = tf.reshape(tf.multinomial(vocab_scores, 1),[-1]) # calculate multinomial
tensorflow.argmax
10,285
import tensorflow as tf xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.compat.v1.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph # We start injecting the fault right away here unlike earlier fi = ti.TensorFI(sess, name="NearestNeighbor", logLevel=50) # loop over test data
tensorflow.global_variables_initializer
10,286
import tensorflow as tf def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
tensorflow.truncated_normal_initializer
10,287
import tensorflow as tf all_layer_outputs = [] if input_width != hidden_size: prev_output = dense_layer_2d( input_tensor, hidden_size, create_initializer(initializer_range), None, name="embedding_hidden_mapping_in") else: prev_output = input_tensor with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE): for layer_idx in range(num_hidden_layers): group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups) with tf.variable_scope("group_%d" % group_idx): with tf.name_scope("layer_%d" % layer_idx): layer_output = prev_output for inner_group_idx in range(inner_group_num): with tf.variable_scope("inner_group_%d" % inner_group_idx): layer_output = attention_ffn_block( layer_output, hidden_size, attention_mask, num_attention_heads, attention_head_size, attention_probs_dropout_prob, intermediate_size, intermediate_act_fn, initializer_range, hidden_dropout_prob) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: return all_layer_outputs else: return all_layer_outputs[-1]
tensorflow.variable_scope
10,288
import tensorflow as tf average_across_timesteps=False, average_across_batch=True) self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0., trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate')
tensorflow.trainable_variables
10,289
import tensorflow as tf import imageio import argparse def transform(image, resize_height=36, resize_width=64): cropped_image = scipy.misc.imresize(image, [resize_height, resize_width]) return np.array(cropped_image)/127.5 - 1. def inverse_transform(images): return (images+1.)/2. def lrelu(x, leak=0.2, name="lrelu"): return tf.maximum(x, leak*x) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
tensorflow.maximum
10,290
import tensorflow as tf """ Build the custom CNN for the CIFAR-10 dataset. """ # The input data holders (cf. shapes after prepa) self.X = tf.compat.v1.placeholder(tf.float32, shape = (None, self.config.data["image_size"], self.config.data["image_size"], self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3) self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10) self.train = tf.compat.v1.placeholder(tf.bool) # The CNN architecture = conv/poo layers + flatten layer + connected layers with tf.name_scope("cnn"): # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"],
tensorflow.compat.v1.placeholder
10,291
import tensorflow as tf output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME") outputs = tf.concat((outputs, output), -1) outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training)) return outputs class Model: def __init__(self, num_layers, size_layers, learning_rate=1e-3, dropout=1.0): self.X = tf.placeholder(tf.int32, (None, None)) self.training = tf.placeholder(tf.bool, None) lookup_table = tf.get_variable( "lookup_table", dtype=tf.float32, shape=[len(vocab), size_layers], initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01), ) lookup_table = tf.concat((tf.zeros(shape=[1, size_layers]), lookup_table[1:, :]), 0) forward = tf.nn.embedding_lookup(lookup_table, self.X) self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled)) self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1) self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:] self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1)) batch_size = tf.shape(self.X)[0] seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1 def cells(reuse=False): return tf.contrib.rnn.DropoutWrapper(
tensorflow.truncated_normal_initializer
10,292
import tensorflow as tf # Classification accuracy of encoder correct_pred = tf.equal(tf.argmax(encoder_output_label_, 1), tf.argmax(y_input, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Autoencoder loss autoencoder_loss = tf.reduce_mean(tf.square(x_target - decoder_output)) # Gaussian Discriminator Loss dc_g_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_real), logits=d_g_real)) dc_g_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_g_fake), logits=d_g_fake)) dc_g_loss = dc_g_loss_fake + dc_g_loss_real # Categorical Discrimminator Loss dc_c_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real)) dc_c_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake))
tensorflow.ones_like
10,293
from tensorflow.python.ops import array_ops # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape(
tensorflow.python.ops.array_ops.size
10,294
import tensorflow as tf with tf.device(device): total_loss = tf.reduce_mean(losses) avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) gradient_clip = FLAGS.gradient_clip learning_rate = self.model_conf.get_learning_rate() if self.dataset and FLAGS.num_epochs_per_decay > 0: num_batches_per_epoch = ( self.dataset.num_examples_per_epoch() / self.batch_size) decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. learning_rate = tf.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True) if gradient_clip is not None: clipped_grads = [ (tf.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads ] else: clipped_grads = avg_grads
tensorflow.train.exponential_decay
10,295
import tensorflow as tf print(f"Obs_phs space: {obs_phs}") #assert 5 == 1 ####################### for var in tf.all_variables(): print(var) batch_size = tf.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions,
tensorflow.stack
10,296
import tensorflow as tf
tensorflow.train.ExponentialMovingAverage
10,297
from tensorflow.python.framework import ops return math_ops.select( math_ops.greater(true_positives + false_positives, 0), math_ops.div(true_positives, true_positives + false_positives), 0, name) precision = compute_precision('value') with ops.control_dependencies([true_positives_update_op, false_positives_update_op]): update_op = compute_precision('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, precision) if updates_collections: ops.add_to_collections(updates_collections, update_op) return precision, update_op @deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask') def streaming_recall(predictions, labels, ignore_mask=None, weights=None, metrics_collections=None, updates_collections=None, name=None):
tensorflow.python.framework.ops.add_to_collections
10,298
import tensorflow as tf biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope("softmax_linear"): weights = tf.Variable( tf.truncated_normal([32, 10], stddev=1.0 / math.sqrt(float(32))), name="weights") biases = tf.Variable(tf.zeros([10]), name="biases") logits = tf.matmul(hidden2, weights) + biases tf.add_to_collection("logits", logits) # Runs to logit. tf.initialize_all_variables().run() sess.run(logits) # Creates a saver. saver0 = tf.train.Saver() saver0.save(sess, saver0_ckpt) # Generates MetaGraphDef. saver0.export_meta_graph(filename)
tensorflow.matmul
10,299