text
stringlengths 0
4.99k
|
---|
@property
|
def metrics(self):
|
# We list our `Metric` objects here so that `reset_states()` can be
|
# called automatically at the start of each epoch
|
# or at the start of `evaluate()`.
|
# If you don't implement this property, you have to call
|
# `reset_states()` yourself at the time of your choosing.
|
return [loss_tracker, mae_metric]
|
# Construct an instance of CustomModel
|
inputs = keras.Input(shape=(32,))
|
outputs = keras.layers.Dense(1)(inputs)
|
model = CustomModel(inputs, outputs)
|
# We don't passs a loss or metrics here.
|
model.compile(optimizer="adam")
|
# Just use `fit` as usual -- you can use callbacks, etc.
|
x = np.random.random((1000, 32))
|
y = np.random.random((1000, 1))
|
model.fit(x, y, epochs=5)
|
Epoch 1/5
|
32/32 [==============================] - 0s 645us/step - loss: 0.2661 - mae: 0.4126
|
Epoch 2/5
|
32/32 [==============================] - 0s 515us/step - loss: 0.2401 - mae: 0.3932
|
Epoch 3/5
|
32/32 [==============================] - 0s 605us/step - loss: 0.2283 - mae: 0.3833
|
Epoch 4/5
|
32/32 [==============================] - 0s 508us/step - loss: 0.2176 - mae: 0.3742
|
Epoch 5/5
|
32/32 [==============================] - 0s 448us/step - loss: 0.2070 - mae: 0.3654
|
<tensorflow.python.keras.callbacks.History at 0x151c8ee50>
|
Supporting sample_weight & class_weight
|
You may have noticed that our first basic example didn't make any mention of sample weighting. If you want to support the fit() arguments sample_weight and class_weight, you'd simply do the following:
|
Unpack sample_weight from the data argument
|
Pass it to compiled_loss & compiled_metrics (of course, you could also just apply it manually if you don't rely on compile() for losses & metrics)
|
That's it. That's the list.
|
class CustomModel(keras.Model):
|
def train_step(self, data):
|
# Unpack the data. Its structure depends on your model and
|
# on what you pass to `fit()`.
|
if len(data) == 3:
|
x, y, sample_weight = data
|
else:
|
sample_weight = None
|
x, y = data
|
with tf.GradientTape() as tape:
|
y_pred = self(x, training=True) # Forward pass
|
# Compute the loss value.
|
# The loss function is configured in `compile()`.
|
loss = self.compiled_loss(
|
y,
|
y_pred,
|
sample_weight=sample_weight,
|
regularization_losses=self.losses,
|
)
|
# Compute gradients
|
trainable_vars = self.trainable_variables
|
gradients = tape.gradient(loss, trainable_vars)
|
# Update weights
|
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
|
# Update the metrics.
|
# Metrics are configured in `compile()`.
|
self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight)
|
# Return a dict mapping metric names to current value.
|
# Note that it will include the loss (tracked in self.metrics).
|
return {m.name: m.result() for m in self.metrics}
|
# Construct and compile an instance of CustomModel
|
inputs = keras.Input(shape=(32,))
|
outputs = keras.layers.Dense(1)(inputs)
|
model = CustomModel(inputs, outputs)
|
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
|
# You can now use sample_weight argument
|
x = np.random.random((1000, 32))
|
y = np.random.random((1000, 1))
|
sw = np.random.random((1000, 1))
|
model.fit(x, y, sample_weight=sw, epochs=3)
|
Epoch 1/3
|
32/32 [==============================] - 0s 709us/step - loss: 0.6128 - mae: 1.0027
|
Epoch 2/3
|
32/32 [==============================] - 0s 681us/step - loss: 0.2476 - mae: 0.6092
|
Epoch 3/3
|
32/32 [==============================] - 0s 669us/step - loss: 0.1248 - mae: 0.4186
|
<tensorflow.python.keras.callbacks.History at 0x151d5a590>
|
Providing your own evaluation step
|
What if you want to do the same for calls to model.evaluate()? Then you would override test_step in exactly the same way. Here's what it looks like:
|
class CustomModel(keras.Model):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.