text
stringlengths 0
4.99k
|
---|
Note that the weights w and b are automatically tracked by the layer upon being set as layer attributes:
|
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
|
Note you also have access to a quicker shortcut for adding weight to a layer: the add_weight() method:
|
class Linear(keras.layers.Layer):
|
def __init__(self, units=32, input_dim=32):
|
super(Linear, self).__init__()
|
self.w = self.add_weight(
|
shape=(input_dim, units), initializer="random_normal", trainable=True
|
)
|
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
|
def call(self, inputs):
|
return tf.matmul(inputs, self.w) + self.b
|
x = tf.ones((2, 2))
|
linear_layer = Linear(4, 2)
|
y = linear_layer(x)
|
print(y)
|
tf.Tensor(
|
[[-0.01331179 -0.00605625 -0.01042787 0.17160884]
|
[-0.01331179 -0.00605625 -0.01042787 0.17160884]], shape=(2, 4), dtype=float32)
|
Layers can have non-trainable weights
|
Besides trainable weights, you can add non-trainable weights to a layer as well. Such weights are meant not to be taken into account during backpropagation, when you are training the layer.
|
Here's how to add and use a non-trainable weight:
|
class ComputeSum(keras.layers.Layer):
|
def __init__(self, input_dim):
|
super(ComputeSum, self).__init__()
|
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
|
def call(self, inputs):
|
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
|
return self.total
|
x = tf.ones((2, 2))
|
my_sum = ComputeSum(2)
|
y = my_sum(x)
|
print(y.numpy())
|
y = my_sum(x)
|
print(y.numpy())
|
[2. 2.]
|
[4. 4.]
|
It's part of layer.weights, but it gets categorized as a non-trainable weight:
|
print("weights:", len(my_sum.weights))
|
print("non-trainable weights:", len(my_sum.non_trainable_weights))
|
# It's not included in the trainable weights:
|
print("trainable_weights:", my_sum.trainable_weights)
|
weights: 1
|
non-trainable weights: 1
|
trainable_weights: []
|
Best practice: deferring weight creation until the shape of the inputs is known
|
Our Linear layer above took an input_dimargument that was used to compute the shape of the weights w and b in __init__():
|
class Linear(keras.layers.Layer):
|
def __init__(self, units=32, input_dim=32):
|
super(Linear, self).__init__()
|
self.w = self.add_weight(
|
shape=(input_dim, units), initializer="random_normal", trainable=True
|
)
|
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
|
def call(self, inputs):
|
return tf.matmul(inputs, self.w) + self.b
|
In many cases, you may not know in advance the size of your inputs, and you would like to lazily create weights when that value becomes known, some time after instantiating the layer.
|
In the Keras API, we recommend creating layer weights in the build(self, inputs_shape) method of your layer. Like this:
|
class Linear(keras.layers.Layer):
|
def __init__(self, units=32):
|
super(Linear, self).__init__()
|
self.units = units
|
def build(self, input_shape):
|
self.w = self.add_weight(
|
shape=(input_shape[-1], self.units),
|
initializer="random_normal",
|
trainable=True,
|
)
|
self.b = self.add_weight(
|
shape=(self.units,), initializer="random_normal", trainable=True
|
)
|
def call(self, inputs):
|
return tf.matmul(inputs, self.w) + self.b
|
The __call__() method of your layer will automatically run build the first time it is called. You now have a layer that's lazy and thus easier to use:
|
# At instantiation, we don't know on what inputs this is going to get called
|
linear_layer = Linear(32)
|
# The layer's weights are created dynamically the first time the layer is called
|
y = linear_layer(x)
|
Layers are recursively composable
|
If you assign a Layer instance as attribute of another Layer, the outer layer will start tracking the weights of the inner layer.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.