kevinwang676 commited on
Commit
9017242
·
verified ·
1 Parent(s): 9454301

Create test.py

Browse files
Files changed (1) hide show
  1. test.py +55 -0
test.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Auto-encoder to extract features from DNA Methylation and CNV data
2
+ import tensorflow as tf
3
+ from tensorflow.keras.models import Model
4
+ from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization
5
+ from tensorflow.keras import regularizers
6
+ from tensorflow.keras.optimizers import Adam
7
+ from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
8
+
9
+ with tf.device('/gpu:0'):
10
+ # this is the size of our encoded representations
11
+ encoding_dim1 = 500
12
+ encoding_dim2 = 200
13
+
14
+ lambda_act = 0.0001
15
+ lambda_weight = 0.001
16
+ # this is our input placeholder
17
+ input_data = Input(shape=(num_in_neurons,))
18
+ # first encoded representation of the input
19
+ encoded = Dense(encoding_dim1, activation='relu', activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder1')(input_data)
20
+ encoded = BatchNormalization()(encoded)
21
+ encoded = Dropout(0.5)(encoded)
22
+
23
+ # second encoded representation of the input
24
+
25
+ encoded = Dense(encoding_dim2, activation='relu', activity_regularizer=regularizers.l1(lambda_act), kernel_regularizer=regularizers.l2(lambda_weight), name='encoder2')(encoded)
26
+ encoded = BatchNormalization()(encoded)
27
+ encoded = Dropout(0.5)(encoded)
28
+
29
+ # first lossy reconstruction of the input
30
+
31
+ decoded = Dense(encoding_dim1, activation='relu', name='decoder1')(encoded)
32
+ decoded = BatchNormalization()(decoded)
33
+
34
+ # the final lossy reconstruction of the input
35
+ decoded = Dense(num_in_neurons, activation='sigmoid', name='decoder2')(decoded)
36
+
37
+ # this model maps an input to its reconstruction
38
+ autoencoder = Model(inputs=input_data, outputs=decoded)
39
+
40
+ myencoder = Model(inputs=input_data, outputs=encoded)
41
+ autoencoder.compile(optimizer=Adam(), loss='mse')
42
+ # setup callbacks
43
+ callbacks = [
44
+ EarlyStopping(monitor='val_loss', patience=5, verbose=1),
45
+ ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
46
+ ]
47
+ # training
48
+ print('training the autoencoder')
49
+ autoencoder.fit(x_train_noisy, x_train,
50
+ epochs=25,
51
+ batch_size=8,
52
+ shuffle=True,
53
+ validation_data=(x_test_noisy, x_test),
54
+ callbacks=callbacks)
55
+ autoencoder.trainable = False #freeze autoencoder weights