Spaces:
Runtime error
Runtime error
Update test.py
Browse files
test.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
# Auto-encoder to extract features from DNA Methylation and CNV data
|
2 |
import tensorflow as tf
|
3 |
from tensorflow.keras.models import Model
|
4 |
from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization
|
@@ -10,46 +9,60 @@ with tf.device('/gpu:0'):
|
|
10 |
# this is the size of our encoded representations
|
11 |
encoding_dim1 = 500
|
12 |
encoding_dim2 = 200
|
13 |
-
|
14 |
lambda_act = 0.0001
|
15 |
lambda_weight = 0.001
|
|
|
16 |
# this is our input placeholder
|
17 |
input_data = Input(shape=(num_in_neurons,))
|
|
|
18 |
# first encoded representation of the input
|
19 |
-
encoded = Dense(encoding_dim1, activation='relu',
|
|
|
|
|
20 |
encoded = BatchNormalization()(encoded)
|
21 |
encoded = Dropout(0.5)(encoded)
|
22 |
-
|
23 |
# second encoded representation of the input
|
24 |
-
|
25 |
-
|
|
|
26 |
encoded = BatchNormalization()(encoded)
|
27 |
encoded = Dropout(0.5)(encoded)
|
28 |
-
|
29 |
# first lossy reconstruction of the input
|
30 |
-
|
31 |
-
decoded = Dense(encoding_dim1, activation='relu', name='decoder1')(encoded)
|
32 |
decoded = BatchNormalization()(decoded)
|
33 |
-
|
34 |
# the final lossy reconstruction of the input
|
35 |
-
decoded = Dense(num_in_neurons, activation='sigmoid'
|
36 |
-
|
37 |
# this model maps an input to its reconstruction
|
38 |
autoencoder = Model(inputs=input_data, outputs=decoded)
|
39 |
-
|
40 |
-
myencoder = Model(inputs=input_data, outputs=encoded)
|
41 |
autoencoder.compile(optimizer=Adam(), loss='mse')
|
42 |
-
|
|
|
43 |
callbacks = [
|
44 |
EarlyStopping(monitor='val_loss', patience=5, verbose=1),
|
45 |
ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
|
46 |
]
|
|
|
47 |
# training
|
48 |
-
print('
|
49 |
autoencoder.fit(x_train_noisy, x_train,
|
50 |
-
epochs=
|
51 |
-
batch_size=
|
52 |
shuffle=True,
|
53 |
validation_data=(x_test_noisy, x_test),
|
54 |
callbacks=callbacks)
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import tensorflow as tf
|
2 |
from tensorflow.keras.models import Model
|
3 |
from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization
|
|
|
9 |
# this is the size of our encoded representations
|
10 |
encoding_dim1 = 500
|
11 |
encoding_dim2 = 200
|
12 |
+
|
13 |
lambda_act = 0.0001
|
14 |
lambda_weight = 0.001
|
15 |
+
|
16 |
# this is our input placeholder
|
17 |
input_data = Input(shape=(num_in_neurons,))
|
18 |
+
|
19 |
# first encoded representation of the input
|
20 |
+
encoded = Dense(encoding_dim1, activation='relu',
|
21 |
+
activity_regularizer=regularizers.l1(lambda_act),
|
22 |
+
kernel_regularizer=regularizers.l2(lambda_weight))(input_data)
|
23 |
encoded = BatchNormalization()(encoded)
|
24 |
encoded = Dropout(0.5)(encoded)
|
25 |
+
|
26 |
# second encoded representation of the input
|
27 |
+
encoded = Dense(encoding_dim2, activation='relu',
|
28 |
+
activity_regularizer=regularizers.l1(lambda_act),
|
29 |
+
kernel_regularizer=regularizers.l2(lambda_weight))(encoded)
|
30 |
encoded = BatchNormalization()(encoded)
|
31 |
encoded = Dropout(0.5)(encoded)
|
32 |
+
|
33 |
# first lossy reconstruction of the input
|
34 |
+
decoded = Dense(encoding_dim1, activation='relu')(encoded)
|
|
|
35 |
decoded = BatchNormalization()(decoded)
|
36 |
+
|
37 |
# the final lossy reconstruction of the input
|
38 |
+
decoded = Dense(num_in_neurons, activation='sigmoid')(decoded)
|
39 |
+
|
40 |
# this model maps an input to its reconstruction
|
41 |
autoencoder = Model(inputs=input_data, outputs=decoded)
|
|
|
|
|
42 |
autoencoder.compile(optimizer=Adam(), loss='mse')
|
43 |
+
|
44 |
+
# setup callbacks
|
45 |
callbacks = [
|
46 |
EarlyStopping(monitor='val_loss', patience=5, verbose=1),
|
47 |
ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
|
48 |
]
|
49 |
+
|
50 |
# training
|
51 |
+
print('Training the autoencoder')
|
52 |
autoencoder.fit(x_train_noisy, x_train,
|
53 |
+
epochs=50,
|
54 |
+
batch_size=16,
|
55 |
shuffle=True,
|
56 |
validation_data=(x_test_noisy, x_test),
|
57 |
callbacks=callbacks)
|
58 |
+
|
59 |
+
# Load best model
|
60 |
+
autoencoder.load_weights('best_model.h5')
|
61 |
+
|
62 |
+
# Freeze the weights
|
63 |
+
autoencoder.trainable = False
|
64 |
+
|
65 |
+
# Use the trained encoder for predictions
|
66 |
+
myencoder = Model(inputs=input_data, outputs=encoded)
|
67 |
+
ae_train = myencoder.predict(x_train)
|
68 |
+
ae_test = myencoder.predict(x_test)
|