nanduriprudhvi commited on
Commit
c5de1d5
·
verified ·
1 Parent(s): 9137f09

Update gru_model.py

Browse files
Files changed (1) hide show
  1. gru_model.py +244 -244
gru_model.py CHANGED
@@ -1,245 +1,245 @@
1
- import tensorflow as tf
2
- from tensorflow.keras import layers, models # type: ignore
3
- import numpy as np
4
-
5
- # Define the ConvGRU2DLayer (same as before)
6
- class ConvGRU2DLayer(layers.Layer):
7
- def __init__(self, filters, kernel_size, return_sequences=True, **kwargs):
8
- super().__init__(**kwargs)
9
- self.filters = filters
10
- self.kernel_size = kernel_size
11
- self.return_sequences = return_sequences
12
-
13
- def build(self, input_shape):
14
- self.input_projection = layers.Conv2D(self.filters, (1, 1), padding="same")
15
- self.conv_z = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
16
- self.conv_r = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
17
- self.conv_h = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="tanh")
18
- super().build(input_shape)
19
-
20
- def call(self, inputs):
21
- batch_size, time_steps, height, width, channels = tf.unstack(tf.shape(inputs))
22
- time_steps=inputs.shape[1]
23
- h_t = tf.zeros((batch_size, height, width, self.filters))
24
- outputs = []
25
-
26
- for t in range(time_steps):
27
- x_t = inputs[:, t, :, :, :]
28
- x_projected = self.input_projection(x_t)
29
- z = self.conv_z(x_projected)+self.conv_z(h_t)
30
- r = self.conv_r(x_projected)+self.conv_z(h_t)
31
- h_tilde = self.conv_h(r * h_t)
32
- h_t = (1 - z) * h_t + z * h_tilde
33
-
34
- if self.return_sequences:
35
- outputs.append(h_t)
36
-
37
- if self.return_sequences:
38
- outputs = tf.stack(outputs, axis=1)
39
- else:
40
- outputs = h_t
41
-
42
- return outputs
43
-
44
- # Define the model (same as before)
45
- def build_convgru_model(input_shape=(8, 95, 95, 2)):
46
- input_tensor = layers.Input(shape=input_shape)
47
- x = ConvGRU2DLayer(filters=32, kernel_size=(3, 3), return_sequences=True)(input_tensor)
48
- x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
49
- x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
50
- x = ConvGRU2DLayer(filters=64, kernel_size=(3, 3), return_sequences=True)(x)
51
- x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
52
- x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
53
- x = ConvGRU2DLayer(filters=128, kernel_size=(3, 3), return_sequences=True)(x)
54
- x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
55
- x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
56
- x = layers.Flatten()(x)
57
- model = models.Model(inputs=input_tensor, outputs=x)
58
- return model
59
-
60
- def radial_structure_subnet(input_shape):
61
- """
62
- Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
63
-
64
- Parameters:
65
- - input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
66
-
67
- Returns:
68
- - model: tf.keras.Model, the radial structure subnet model
69
- """
70
-
71
- input_tensor = layers.Input(shape=input_shape)
72
-
73
- # Divide input data into four quadrants (NW, NE, SW, SE)
74
- # Assuming the input shape is (batch_size, height, width, channels)
75
-
76
- # Quadrant extraction - using slicing to separate quadrants
77
- nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
78
- ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
79
- sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
80
- se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
81
-
82
-
83
- target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
84
- target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
85
-
86
- # Padding the quadrants to match the target size (48, 48)
87
- nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
88
- (0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
89
- ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
90
- (0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
91
- sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
92
- (0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
93
- se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
94
- (0, target_width - se_quadrant.shape[2])))(se_quadrant)
95
-
96
- print(nw_quadrant.shape)
97
- print(ne_quadrant.shape)
98
- print(sw_quadrant.shape)
99
- print(se_quadrant.shape)
100
- # Main branch (processing the entire structure)
101
- main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
102
- y=layers.MaxPool2D()(main_branch)
103
-
104
- y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
105
- (0, target_width - y.shape[2])))(y)
106
- # Side branches (processing the individual quadrants)
107
- nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
108
- ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
109
- sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
110
- se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
111
-
112
- # Apply padding to the side branches to match the dimensions of the main branch
113
- # nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
114
- # ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
115
- # sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
116
- # se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
117
-
118
- # Fusion operations (concatenate the outputs from the main branch and side branches)
119
- fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
120
-
121
- # Additional convolution layer to combine the fused features
122
- x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
123
- x=layers.MaxPool2D(pool_size=(2, 2))(x)
124
- # Final dense layer for further processing
125
- nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
126
-
127
- ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
128
- sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
129
- se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
130
- nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
131
- ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
132
- sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
133
- se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
134
-
135
- fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
136
- x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
137
- x=layers.MaxPool2D(pool_size=(2, 2))(x)
138
-
139
- nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
140
-
141
- ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
142
- sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
143
- se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
144
- nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
145
- ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
146
- sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
147
- se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
148
-
149
- fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
150
- x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
151
- x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
152
- # Create and return the model
153
- x=layers.Flatten()(x)
154
- model = models.Model(inputs=input_tensor, outputs=x)
155
- return model
156
-
157
- # Define input shape (batch_size, height, width, channels)
158
- # input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
159
-
160
- # # Build the model
161
- # model = radial_structure_subnet(input_shape)
162
-
163
- # # Model summary
164
- # model.summary()
165
-
166
- def build_cnn_model(input_shape=(8, 8, 1)):
167
- # Define the input layer
168
- input_tensor = layers.Input(shape=input_shape)
169
-
170
- # Convolutional layer
171
- x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
172
- x = layers.BatchNormalization()(x)
173
- x = layers.ReLU()(x)
174
-
175
- # Flatten layer
176
- x = layers.Flatten()(x)
177
-
178
- # Create the model
179
- model = models.Model(inputs=input_tensor, outputs=x)
180
-
181
- return model
182
-
183
- from tensorflow.keras import layers, models, Input # type: ignore
184
-
185
- def build_combined_model():
186
- # Define input shapes
187
- input_shape_3d = (8, 95, 95, 2)
188
- input_shape_radial = (95, 95, 8)
189
- input_shape_cnn = (8, 8, 1)
190
-
191
- input_shape_latitude = (8,)
192
- input_shape_longitude = (8,)
193
- input_shape_other = (9,)
194
-
195
- # Build individual models
196
- model_3d = build_convgru_model(input_shape=input_shape_3d)
197
- model_radial = radial_structure_subnet(input_shape=input_shape_radial)
198
- model_cnn = build_cnn_model(input_shape=input_shape_cnn)
199
-
200
- # Define new inputs
201
- input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
202
- input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
203
- input_other = Input(shape=input_shape_other, name="other_input")
204
-
205
- # Flatten the additional inputs
206
- flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
207
- flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
208
- flat_other = layers.Dense(64,activation='relu')(input_other)
209
-
210
- # Combine all outputs
211
- combined = layers.concatenate([
212
- model_3d.output,
213
- model_radial.output,
214
- model_cnn.output,
215
- flat_latitude,
216
- flat_longitude,
217
- flat_other
218
- ])
219
-
220
- # Add dense layers for final processing
221
- x = layers.Dense(128, activation='relu')(combined)
222
- x = layers.Dense(1, activation=None)(x)
223
-
224
- # Create the final model
225
- final_model = models.Model(
226
- inputs=[model_3d.input, model_radial.input, model_cnn.input,
227
- input_latitude, input_longitude, input_other ],
228
- outputs=x
229
- )
230
-
231
- return final_model
232
-
233
- import h5py
234
- with h5py.File(r"E:\1MAIN PROJECT\tf_env\convgru-model.h5", 'r') as f:
235
- print(f.attrs.get('keras_version'))
236
- print(f.attrs.get('backend'))
237
- print("Model layers:", list(f['model_weights'].keys()))
238
-
239
- model = build_combined_model() # Your original model building function
240
- model.load_weights(r"E:\1MAIN PROJECT\tf_env\convgru-model.h5")
241
-
242
-
243
- def predict(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
244
- y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
245
  return y
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras import layers, models # type: ignore
3
+ import numpy as np
4
+
5
+ # Define the ConvGRU2DLayer (same as before)
6
+ class ConvGRU2DLayer(layers.Layer):
7
+ def __init__(self, filters, kernel_size, return_sequences=True, **kwargs):
8
+ super().__init__(**kwargs)
9
+ self.filters = filters
10
+ self.kernel_size = kernel_size
11
+ self.return_sequences = return_sequences
12
+
13
+ def build(self, input_shape):
14
+ self.input_projection = layers.Conv2D(self.filters, (1, 1), padding="same")
15
+ self.conv_z = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
16
+ self.conv_r = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
17
+ self.conv_h = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="tanh")
18
+ super().build(input_shape)
19
+
20
+ def call(self, inputs):
21
+ batch_size, time_steps, height, width, channels = tf.unstack(tf.shape(inputs))
22
+ time_steps=inputs.shape[1]
23
+ h_t = tf.zeros((batch_size, height, width, self.filters))
24
+ outputs = []
25
+
26
+ for t in range(time_steps):
27
+ x_t = inputs[:, t, :, :, :]
28
+ x_projected = self.input_projection(x_t)
29
+ z = self.conv_z(x_projected)+self.conv_z(h_t)
30
+ r = self.conv_r(x_projected)+self.conv_z(h_t)
31
+ h_tilde = self.conv_h(r * h_t)
32
+ h_t = (1 - z) * h_t + z * h_tilde
33
+
34
+ if self.return_sequences:
35
+ outputs.append(h_t)
36
+
37
+ if self.return_sequences:
38
+ outputs = tf.stack(outputs, axis=1)
39
+ else:
40
+ outputs = h_t
41
+
42
+ return outputs
43
+
44
+ # Define the model (same as before)
45
+ def build_convgru_model(input_shape=(8, 95, 95, 2)):
46
+ input_tensor = layers.Input(shape=input_shape)
47
+ x = ConvGRU2DLayer(filters=32, kernel_size=(3, 3), return_sequences=True)(input_tensor)
48
+ x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
49
+ x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
50
+ x = ConvGRU2DLayer(filters=64, kernel_size=(3, 3), return_sequences=True)(x)
51
+ x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
52
+ x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
53
+ x = ConvGRU2DLayer(filters=128, kernel_size=(3, 3), return_sequences=True)(x)
54
+ x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
55
+ x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
56
+ x = layers.Flatten()(x)
57
+ model = models.Model(inputs=input_tensor, outputs=x)
58
+ return model
59
+
60
+ def radial_structure_subnet(input_shape):
61
+ """
62
+ Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
63
+
64
+ Parameters:
65
+ - input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
66
+
67
+ Returns:
68
+ - model: tf.keras.Model, the radial structure subnet model
69
+ """
70
+
71
+ input_tensor = layers.Input(shape=input_shape)
72
+
73
+ # Divide input data into four quadrants (NW, NE, SW, SE)
74
+ # Assuming the input shape is (batch_size, height, width, channels)
75
+
76
+ # Quadrant extraction - using slicing to separate quadrants
77
+ nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
78
+ ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
79
+ sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
80
+ se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
81
+
82
+
83
+ target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
84
+ target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
85
+
86
+ # Padding the quadrants to match the target size (48, 48)
87
+ nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
88
+ (0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
89
+ ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
90
+ (0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
91
+ sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
92
+ (0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
93
+ se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
94
+ (0, target_width - se_quadrant.shape[2])))(se_quadrant)
95
+
96
+ print(nw_quadrant.shape)
97
+ print(ne_quadrant.shape)
98
+ print(sw_quadrant.shape)
99
+ print(se_quadrant.shape)
100
+ # Main branch (processing the entire structure)
101
+ main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
102
+ y=layers.MaxPool2D()(main_branch)
103
+
104
+ y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
105
+ (0, target_width - y.shape[2])))(y)
106
+ # Side branches (processing the individual quadrants)
107
+ nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
108
+ ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
109
+ sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
110
+ se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
111
+
112
+ # Apply padding to the side branches to match the dimensions of the main branch
113
+ # nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
114
+ # ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
115
+ # sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
116
+ # se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
117
+
118
+ # Fusion operations (concatenate the outputs from the main branch and side branches)
119
+ fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
120
+
121
+ # Additional convolution layer to combine the fused features
122
+ x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
123
+ x=layers.MaxPool2D(pool_size=(2, 2))(x)
124
+ # Final dense layer for further processing
125
+ nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
126
+
127
+ ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
128
+ sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
129
+ se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
130
+ nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
131
+ ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
132
+ sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
133
+ se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
134
+
135
+ fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
136
+ x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
137
+ x=layers.MaxPool2D(pool_size=(2, 2))(x)
138
+
139
+ nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
140
+
141
+ ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
142
+ sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
143
+ se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
144
+ nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
145
+ ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
146
+ sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
147
+ se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
148
+
149
+ fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
150
+ x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
151
+ x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
152
+ # Create and return the model
153
+ x=layers.Flatten()(x)
154
+ model = models.Model(inputs=input_tensor, outputs=x)
155
+ return model
156
+
157
+ # Define input shape (batch_size, height, width, channels)
158
+ # input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
159
+
160
+ # # Build the model
161
+ # model = radial_structure_subnet(input_shape)
162
+
163
+ # # Model summary
164
+ # model.summary()
165
+
166
+ def build_cnn_model(input_shape=(8, 8, 1)):
167
+ # Define the input layer
168
+ input_tensor = layers.Input(shape=input_shape)
169
+
170
+ # Convolutional layer
171
+ x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
172
+ x = layers.BatchNormalization()(x)
173
+ x = layers.ReLU()(x)
174
+
175
+ # Flatten layer
176
+ x = layers.Flatten()(x)
177
+
178
+ # Create the model
179
+ model = models.Model(inputs=input_tensor, outputs=x)
180
+
181
+ return model
182
+
183
+ from tensorflow.keras import layers, models, Input # type: ignore
184
+
185
+ def build_combined_model():
186
+ # Define input shapes
187
+ input_shape_3d = (8, 95, 95, 2)
188
+ input_shape_radial = (95, 95, 8)
189
+ input_shape_cnn = (8, 8, 1)
190
+
191
+ input_shape_latitude = (8,)
192
+ input_shape_longitude = (8,)
193
+ input_shape_other = (9,)
194
+
195
+ # Build individual models
196
+ model_3d = build_convgru_model(input_shape=input_shape_3d)
197
+ model_radial = radial_structure_subnet(input_shape=input_shape_radial)
198
+ model_cnn = build_cnn_model(input_shape=input_shape_cnn)
199
+
200
+ # Define new inputs
201
+ input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
202
+ input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
203
+ input_other = Input(shape=input_shape_other, name="other_input")
204
+
205
+ # Flatten the additional inputs
206
+ flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
207
+ flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
208
+ flat_other = layers.Dense(64,activation='relu')(input_other)
209
+
210
+ # Combine all outputs
211
+ combined = layers.concatenate([
212
+ model_3d.output,
213
+ model_radial.output,
214
+ model_cnn.output,
215
+ flat_latitude,
216
+ flat_longitude,
217
+ flat_other
218
+ ])
219
+
220
+ # Add dense layers for final processing
221
+ x = layers.Dense(128, activation='relu')(combined)
222
+ x = layers.Dense(1, activation=None)(x)
223
+
224
+ # Create the final model
225
+ final_model = models.Model(
226
+ inputs=[model_3d.input, model_radial.input, model_cnn.input,
227
+ input_latitude, input_longitude, input_other ],
228
+ outputs=x
229
+ )
230
+
231
+ return final_model
232
+
233
+ import h5py
234
+ with h5py.File(r"convgru-model.h5", 'r') as f:
235
+ print(f.attrs.get('keras_version'))
236
+ print(f.attrs.get('backend'))
237
+ print("Model layers:", list(f['model_weights'].keys()))
238
+
239
+ model = build_combined_model() # Your original model building function
240
+ model.load_weights(r"convgru-model.h5")
241
+
242
+
243
+ def predict(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
244
+ y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
245
  return y