text
stringlengths 0
4.99k
|
---|
def test_step(self, data):
|
# Unpack the data
|
x, y = data
|
# Compute predictions
|
y_pred = self(x, training=False)
|
# Updates the metrics tracking the loss
|
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
|
# Update the metrics.
|
self.compiled_metrics.update_state(y, y_pred)
|
# Return a dict mapping metric names to current value.
|
# Note that it will include the loss (tracked in self.metrics).
|
return {m.name: m.result() for m in self.metrics}
|
# Construct an instance of CustomModel
|
inputs = keras.Input(shape=(32,))
|
outputs = keras.layers.Dense(1)(inputs)
|
model = CustomModel(inputs, outputs)
|
model.compile(loss="mse", metrics=["mae"])
|
# Evaluate with our custom test_step
|
x = np.random.random((1000, 32))
|
y = np.random.random((1000, 1))
|
model.evaluate(x, y)
|
32/32 [==============================] - 0s 578us/step - loss: 0.7436 - mae: 0.7455
|
[0.744135320186615, 0.7466798424720764]
|
Wrapping up: an end-to-end GAN example
|
Let's walk through an end-to-end example that leverages everything you just learned.
|
Let's consider:
|
A generator network meant to generate 28x28x1 images.
|
A discriminator network meant to classify 28x28x1 images into two classes ("fake" and "real").
|
One optimizer for each.
|
A loss function to train the discriminator.
|
from tensorflow.keras import layers
|
# Create the discriminator
|
discriminator = keras.Sequential(
|
[
|
keras.Input(shape=(28, 28, 1)),
|
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
|
layers.LeakyReLU(alpha=0.2),
|
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
|
layers.LeakyReLU(alpha=0.2),
|
layers.GlobalMaxPooling2D(),
|
layers.Dense(1),
|
],
|
name="discriminator",
|
)
|
# Create the generator
|
latent_dim = 128
|
generator = keras.Sequential(
|
[
|
keras.Input(shape=(latent_dim,)),
|
# We want to generate 128 coefficients to reshape into a 7x7x128 map
|
layers.Dense(7 * 7 * 128),
|
layers.LeakyReLU(alpha=0.2),
|
layers.Reshape((7, 7, 128)),
|
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
|
layers.LeakyReLU(alpha=0.2),
|
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
|
layers.LeakyReLU(alpha=0.2),
|
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
|
],
|
name="generator",
|
)
|
Here's a feature-complete GAN class, overriding compile() to use its own signature, and implementing the entire GAN algorithm in 17 lines in train_step:
|
class GAN(keras.Model):
|
def __init__(self, discriminator, generator, latent_dim):
|
super(GAN, self).__init__()
|
self.discriminator = discriminator
|
self.generator = generator
|
self.latent_dim = latent_dim
|
def compile(self, d_optimizer, g_optimizer, loss_fn):
|
super(GAN, self).compile()
|
self.d_optimizer = d_optimizer
|
self.g_optimizer = g_optimizer
|
self.loss_fn = loss_fn
|
def train_step(self, real_images):
|
if isinstance(real_images, tuple):
|
real_images = real_images[0]
|
# Sample random points in the latent space
|
batch_size = tf.shape(real_images)[0]
|
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
|
# Decode them to fake images
|
generated_images = self.generator(random_latent_vectors)
|
# Combine them with real images
|
combined_images = tf.concat([generated_images, real_images], axis=0)
|
# Assemble labels discriminating real from fake images
|
labels = tf.concat(
|
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.