text
stringlengths 0
4.99k
|
---|
)
|
# Add random noise to the labels - important trick!
|
labels += 0.05 * tf.random.uniform(tf.shape(labels))
|
# Train the discriminator
|
with tf.GradientTape() as tape:
|
predictions = self.discriminator(combined_images)
|
d_loss = self.loss_fn(labels, predictions)
|
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
|
self.d_optimizer.apply_gradients(
|
zip(grads, self.discriminator.trainable_weights)
|
)
|
# Sample random points in the latent space
|
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
|
# Assemble labels that say "all real images"
|
misleading_labels = tf.zeros((batch_size, 1))
|
# Train the generator (note that we should *not* update the weights
|
# of the discriminator)!
|
with tf.GradientTape() as tape:
|
predictions = self.discriminator(self.generator(random_latent_vectors))
|
g_loss = self.loss_fn(misleading_labels, predictions)
|
grads = tape.gradient(g_loss, self.generator.trainable_weights)
|
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
|
return {"d_loss": d_loss, "g_loss": g_loss}
|
Let's test-drive it:
|
# Prepare the dataset. We use both the training & test MNIST digits.
|
batch_size = 64
|
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
|
all_digits = np.concatenate([x_train, x_test])
|
all_digits = all_digits.astype("float32") / 255.0
|
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
|
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
|
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
|
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
|
gan.compile(
|
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
|
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
|
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
|
)
|
# To limit the execution time, we only train on 100 batches. You can train on
|
# the entire dataset. You will need about 20 epochs to get nice results.
|
gan.fit(dataset.take(100), epochs=1)
|
100/100 [==============================] - 60s 591ms/step - d_loss: 0.4534 - g_loss: 0.9839
|
<tensorflow.python.keras.callbacks.History at 0x151e64290>
|
The ideas behind deep learning are simple, so why should their implementation be painful?Understanding masking & padding
|
Authors: Scott Zhu, Francois Chollet
|
Date created: 2019/07/16
|
Last modified: 2020/04/14
|
Description: Complete guide to using mask-aware sequence layers in Keras.
|
View in Colab • GitHub source
|
Setup
|
import numpy as np
|
import tensorflow as tf
|
from tensorflow import keras
|
from tensorflow.keras import layers
|
Introduction
|
Masking is a way to tell sequence-processing layers that certain timesteps in an input are missing, and thus should be skipped when processing the data.
|
Padding is a special form of masking where the masked steps are at the start or the end of a sequence. Padding comes from the need to encode sequence data into contiguous batches: in order to make all sequences in a batch fit a given standard length, it is necessary to pad or truncate some sequences.
|
Let's take a close look.
|
Padding sequence data
|
When processing sequence data, it is very common for individual samples to have different lengths. Consider the following example (text tokenized as words):
|
[
|
["Hello", "world", "!"],
|
["How", "are", "you", "doing", "today"],
|
["The", "weather", "will", "be", "nice", "tomorrow"],
|
]
|
After vocabulary lookup, the data might be vectorized as integers, e.g.:
|
[
|
[71, 1331, 4231]
|
[73, 8, 3215, 55, 927],
|
[83, 91, 1, 645, 1253, 927],
|
]
|
The data is a nested list where individual samples have length 3, 5, and 6, respectively. Since the input data for a deep learning model must be a single tensor (of shape e.g. (batch_size, 6, vocab_size) in this case), samples that are shorter than the longest item need to be padded with some placeholder value (alternatively, one might also truncate long samples before padding short samples).
|
Keras provides a utility function to truncate and pad Python lists to a common length: tf.keras.preprocessing.sequence.pad_sequences.
|
raw_inputs = [
|
[711, 632, 71],
|
[73, 8, 3215, 55, 927],
|
[83, 91, 1, 645, 1253, 927],
|
]
|
# By default, this will pad using 0s; it is configurable via the
|
# "value" parameter.
|
# Note that you could "pre" padding (at the beginning) or
|
# "post" padding (at the end).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.