Spaces:
Sleeping
Sleeping
Upload 15 files
Browse files- 3dcnn-model.h5 +3 -0
- Trj_GRU.h5 +3 -0
- Trj_GRU.weights.h5 +3 -0
- app.py +344 -0
- cnn3d.py +230 -0
- convgru-model.h5 +3 -0
- convlstm.py +229 -0
- final_model.h5 +3 -0
- gru_model.py +245 -0
- lstm3dcnn-model.h5 +3 -0
- requirements.txt +66 -0
- spaio_temp.py +327 -0
- spatio_tempral_LSTM.h5 +3 -0
- trjgru.py +302 -0
- unetlstm.py +243 -0
3dcnn-model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5627ee3abce4137a73bbb083aadd443cead7a40c44acf263d352a2fe9f4791e1
|
3 |
+
size 21623464
|
Trj_GRU.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7354f6aff1914c3865cfc96a00874b57fe1e90aa5f23e2a1f01780c1025e9847
|
3 |
+
size 67594080
|
Trj_GRU.weights.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4459e83521bc34d8d0b7c6517b24cc6f9f9cba49a755bc511a65a4cf1017acd
|
3 |
+
size 67554368
|
app.py
ADDED
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import cv2
|
5 |
+
from scipy.ndimage import gaussian_filter
|
6 |
+
|
7 |
+
# ------------------ TC CENTERING UTILS ------------------
|
8 |
+
|
9 |
+
def find_tc_center(ir_image, smoothing_sigma=3):
|
10 |
+
smoothed_image = gaussian_filter(ir_image, sigma=smoothing_sigma)
|
11 |
+
min_coords = np.unravel_index(np.argmin(smoothed_image), smoothed_image.shape)
|
12 |
+
return min_coords[::-1] # Return as (x, y)
|
13 |
+
|
14 |
+
def extract_local_region(ir_image, center, region_size=95):
|
15 |
+
h, w = ir_image.shape
|
16 |
+
half_size = region_size // 2
|
17 |
+
x_min = max(center[0] - half_size, 0)
|
18 |
+
x_max = min(center[0] + half_size, w)
|
19 |
+
y_min = max(center[1] - half_size, 0)
|
20 |
+
y_max = min(center[1] + half_size, h)
|
21 |
+
region = np.full((region_size, region_size), np.nan)
|
22 |
+
extracted = ir_image[y_min:y_max, x_min:x_max]
|
23 |
+
region[:extracted.shape[0], :extracted.shape[1]] = extracted
|
24 |
+
return region
|
25 |
+
|
26 |
+
def generate_hovmoller(X_data):
|
27 |
+
hovmoller_list = []
|
28 |
+
for ir_images in X_data: # ir_images: shape (8, 95, 95)
|
29 |
+
time_steps = ir_images.shape[0]
|
30 |
+
hovmoller_data = np.zeros((time_steps, 95, 95))
|
31 |
+
for t in range(time_steps):
|
32 |
+
tc_center = find_tc_center(ir_images[t])
|
33 |
+
hovmoller_data[t] = extract_local_region(ir_images[t], tc_center, 95)
|
34 |
+
hovmoller_list.append(hovmoller_data)
|
35 |
+
return np.array(hovmoller_list)
|
36 |
+
|
37 |
+
def reshape_vmax(vmax_values, chunk_size=8):
|
38 |
+
trimmed_size = (len(vmax_values) // chunk_size) * chunk_size
|
39 |
+
vmax_values_trimmed = vmax_values[:trimmed_size]
|
40 |
+
return vmax_values_trimmed.reshape(-1, chunk_size)
|
41 |
+
def create_3d_vmax(vmax_2d_array):
|
42 |
+
# Initialize a 3D array of shape (N, 8, 8) filled with zeros
|
43 |
+
vmax_3d_array = np.zeros((vmax_2d_array.shape[0], 8, 8))
|
44 |
+
|
45 |
+
# Fill the diagonal for each row in the 3D array
|
46 |
+
for i in range(vmax_2d_array.shape[0]):
|
47 |
+
np.fill_diagonal(vmax_3d_array[i], vmax_2d_array[i])
|
48 |
+
|
49 |
+
# Reshape to (N*10, 8, 8, 1) and remove the last element
|
50 |
+
vmax_3d_array = vmax_3d_array.reshape(-1, 8, 8, 1)
|
51 |
+
# Trim last element
|
52 |
+
|
53 |
+
return vmax_3d_array
|
54 |
+
|
55 |
+
def process_lat_values(data):
|
56 |
+
lat_values = data # Convert to NumPy array
|
57 |
+
|
58 |
+
# Trim the array to make its length divisible by 8
|
59 |
+
trimmed_size = (len(lat_values) // 8) * 8
|
60 |
+
lat_values_trimmed = lat_values[:trimmed_size]
|
61 |
+
lat_values_trimmed=np.array(lat_values_trimmed) # Convert to NumPy array
|
62 |
+
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
63 |
+
lat_2d_array = lat_values_trimmed.reshape(-1, 8)
|
64 |
+
|
65 |
+
return lat_2d_array
|
66 |
+
|
67 |
+
def process_lon_values(data):
|
68 |
+
lon_values =data # Convert to NumPy array
|
69 |
+
lon_values = np.array(lon_values) # Convert to NumPy array
|
70 |
+
# Trim the array to make its length divisible by 8
|
71 |
+
trimmed_size = (len(lon_values) // 8) * 8
|
72 |
+
lon_values_trimmed = lon_values[:trimmed_size]
|
73 |
+
|
74 |
+
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
75 |
+
lon_2d_array = lon_values_trimmed.reshape(-1, 8)
|
76 |
+
|
77 |
+
return lon_2d_array
|
78 |
+
|
79 |
+
import numpy as np
|
80 |
+
|
81 |
+
def calculate_intensity_difference(vmax_2d_array):
|
82 |
+
"""Calculates intensity difference for each row in Vmax 2D array."""
|
83 |
+
int_diff = []
|
84 |
+
|
85 |
+
for i in vmax_2d_array:
|
86 |
+
k = abs(i[0] - i[-1]) # Absolute difference between first & last element
|
87 |
+
i = np.append(i, k) # Append difference as the 9th element
|
88 |
+
int_diff.append(i)
|
89 |
+
|
90 |
+
return np.array(int_diff)
|
91 |
+
|
92 |
+
import numpy as np
|
93 |
+
|
94 |
+
# Function to process and reshape image data
|
95 |
+
def process_images(images, batch_size=8, img_size=(95, 95, 1)):
|
96 |
+
num_images = images.shape[0]
|
97 |
+
|
98 |
+
# Trim the dataset to make it divisible by batch_size
|
99 |
+
trimmed_size = (num_images // batch_size) * batch_size
|
100 |
+
images_trimmed = images[:trimmed_size]
|
101 |
+
|
102 |
+
# Reshape into (x, batch_size, img_size[0], img_size[1], img_size[2])
|
103 |
+
images_reshaped = images_trimmed.reshape(-1, batch_size, *img_size)
|
104 |
+
|
105 |
+
return images_reshaped
|
106 |
+
|
107 |
+
import numpy as np
|
108 |
+
|
109 |
+
def process_cc_mask(cc_data):
|
110 |
+
"""Processes CC mask images by trimming and reshaping into (x, 8, 95, 95, 1)."""
|
111 |
+
num_images = cc_data.shape[0]
|
112 |
+
batch_size = 8
|
113 |
+
trimmed_size = (num_images // batch_size) * batch_size # Ensure divisibility by 8
|
114 |
+
|
115 |
+
images_trimmed = cc_data[:trimmed_size] # Trim excess images
|
116 |
+
cc_images = images_trimmed.reshape(-1, batch_size, 95, 95, 1) # Reshape
|
117 |
+
|
118 |
+
return cc_images
|
119 |
+
def extract_convective_cores(ir_data):
|
120 |
+
"""
|
121 |
+
Extract Convective Cores (CCs) from IR imagery based on the criteria in the paper.
|
122 |
+
Args:
|
123 |
+
ir_data: IR imagery of shape (height, width).
|
124 |
+
Returns:
|
125 |
+
cc_mask: Binary mask of CCs (1 for CC, 0 otherwise) of shape (height, width).
|
126 |
+
"""
|
127 |
+
height, width,c = ir_data.shape
|
128 |
+
cc_mask = np.zeros_like(ir_data, dtype=np.float32) # Initialize CC mask
|
129 |
+
|
130 |
+
# Define the neighborhood (8-connected)
|
131 |
+
neighbors = [(-1, -1), (-1, 0), (-1, 1),
|
132 |
+
(0, -1), (0,0) , (0, 1),
|
133 |
+
(1, -1), (1, 0), (1, 1)]
|
134 |
+
|
135 |
+
for i in range(1, height - 1): # Avoid boundary pixels
|
136 |
+
for j in range(1, width - 1):
|
137 |
+
bt_ij = ir_data[i, j]
|
138 |
+
|
139 |
+
# Condition 1: BT < 253K
|
140 |
+
if (bt_ij >= 253).any():
|
141 |
+
continue
|
142 |
+
|
143 |
+
# Condition 2: BT <= BT_n for all neighbors
|
144 |
+
is_local_min = True
|
145 |
+
for di, dj in neighbors:
|
146 |
+
if ir_data[i + di, j + dj] < bt_ij:
|
147 |
+
is_local_min = False
|
148 |
+
break
|
149 |
+
if not is_local_min:
|
150 |
+
continue
|
151 |
+
|
152 |
+
# Condition 3: Gradient condition
|
153 |
+
numerator1 = (ir_data[i - 1, j] + ir_data[i + 1, j] - 2 * bt_ij) / 3.1
|
154 |
+
numerator2 = (ir_data[i, j - 1] + ir_data[i, j + 1] - 2 * bt_ij) / 8.0
|
155 |
+
lhs = numerator1 + numerator2
|
156 |
+
rhs = (4 / 5.8) * np.exp(0.0826 * (bt_ij - 217))
|
157 |
+
|
158 |
+
if lhs > rhs:
|
159 |
+
cc_mask[i, j] = 1 # Mark as CC
|
160 |
+
|
161 |
+
return cc_mask
|
162 |
+
|
163 |
+
def compute_convective_core_masks(ir_data):
|
164 |
+
"""Extracts convective core masks for each IR image."""
|
165 |
+
cc_mask = []
|
166 |
+
|
167 |
+
for i in ir_data:
|
168 |
+
c = extract_convective_cores(i) # Assuming this function is defined
|
169 |
+
c = np.array(c)
|
170 |
+
cc_mask.append(c)
|
171 |
+
|
172 |
+
return np.array(cc_mask)
|
173 |
+
|
174 |
+
|
175 |
+
# ------------------ Streamlit UI ------------------
|
176 |
+
st.set_page_config(page_title="TCIR Daily Input", layout="wide")
|
177 |
+
|
178 |
+
st.title("Tropical Cyclone Input Uploader (8 sets/day)")
|
179 |
+
|
180 |
+
ir_images = st.file_uploader("Upload 8 IR images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
181 |
+
pmw_images = st.file_uploader("Upload 8 PMW images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
182 |
+
|
183 |
+
if len(ir_images) != 8 or len(pmw_images) != 8:
|
184 |
+
st.warning("Please upload exactly 8 IR and 8 PMW images.")
|
185 |
+
else:
|
186 |
+
st.success("Uploaded 8 IR and 8 PMW images successfully.")
|
187 |
+
|
188 |
+
st.header("Input Latitude, Longitude, Vmax")
|
189 |
+
lat_values, lon_values, vmax_values = [], [], []
|
190 |
+
|
191 |
+
col1, col2, col3 = st.columns(3)
|
192 |
+
with col1:
|
193 |
+
for i in range(8):
|
194 |
+
lat_values.append(st.number_input(f"Latitude {i+1}", key=f"lat{i}"))
|
195 |
+
with col2:
|
196 |
+
for i in range(8):
|
197 |
+
lon_values.append(st.number_input(f"Longitude {i+1}", key=f"lon{i}"))
|
198 |
+
with col3:
|
199 |
+
for i in range(8):
|
200 |
+
vmax_values.append(st.number_input(f"Vmax {i+1}", key=f"vmax{i}"))
|
201 |
+
st.header("Select Prediction Model")
|
202 |
+
model_choice = st.selectbox(
|
203 |
+
"Choose a model for prediction",
|
204 |
+
("ConvGRU", "ConvLSTM", "Traj-GRU","3DCNN","spatiotemporalLSTM","Unet_LSTM"),
|
205 |
+
index=0
|
206 |
+
)
|
207 |
+
# ------------------ Process Button ------------------
|
208 |
+
if st.button("Submit for Processing"):
|
209 |
+
|
210 |
+
if len(ir_images) == 8 and len(pmw_images) == 8:
|
211 |
+
# st.success("Starting preprocessing...")
|
212 |
+
if model_choice == "Unet_LSTM":
|
213 |
+
from unetlstm import predict_unetlstm
|
214 |
+
model_predict_fn = predict_unetlstm
|
215 |
+
elif model_choice == "ConvGRU":
|
216 |
+
from gru_model import predict
|
217 |
+
model_predict_fn = predict
|
218 |
+
elif model_choice == "ConvLSTM":
|
219 |
+
from convlstm import predict_lstm
|
220 |
+
model_predict_fn = predict_lstm
|
221 |
+
elif model_choice == "3DCNN":
|
222 |
+
from cnn3d import predict_3dcnn
|
223 |
+
model_predict_fn = predict_3dcnn
|
224 |
+
elif model_choice == "Traj-GRU":
|
225 |
+
from trjgru import predict_trajgru
|
226 |
+
model_predict_fn = predict_trajgru
|
227 |
+
elif model_choice == "spatiotemporalLSTM":
|
228 |
+
from spaio_temp import predict_stlstm
|
229 |
+
model_predict_fn = predict_stlstm
|
230 |
+
# from gru_model import predict
|
231 |
+
# from convlstm import predict_lstm
|
232 |
+
# from cnn3d import predict_3dcnn
|
233 |
+
# from trjgru import predict_trajgru
|
234 |
+
# from spaio_temp import predict_stlstm
|
235 |
+
# from unetlstm import predict_unetlstm
|
236 |
+
ir_arrays = []
|
237 |
+
pmw_arrays = []
|
238 |
+
train_vmax_2d = reshape_vmax(np.array(vmax_values))
|
239 |
+
# st.write("Vmax 2D shape:", train_vmax_2d.shape)
|
240 |
+
train_vmax_3d= create_3d_vmax(train_vmax_2d)
|
241 |
+
# st.write("Vmax 3D shape:", train_vmax_3d.shape)
|
242 |
+
lat_processed = process_lat_values(lat_values)
|
243 |
+
lon_processed = process_lon_values(lon_values)
|
244 |
+
# st.write("Lat 2D shape:", lat_processed.shape)
|
245 |
+
# st.write("Lon 2D shape:", lon_processed.shape)
|
246 |
+
v_max_diff = calculate_intensity_difference(train_vmax_2d)
|
247 |
+
# st.write("Vmax Intensity Difference shape:", v_max_diff.shape)
|
248 |
+
for ir in ir_images:
|
249 |
+
img = Image.open(ir).convert("L")
|
250 |
+
arr = np.array(img).astype(np.float32)
|
251 |
+
bt_arr = (arr / 255.0) * (310 - 190) + 190
|
252 |
+
resized = cv2.resize(bt_arr, (95, 95), interpolation=cv2.INTER_CUBIC)
|
253 |
+
ir_arrays.append(resized)
|
254 |
+
|
255 |
+
for pmw in pmw_images:
|
256 |
+
img = Image.open(pmw).convert("L")
|
257 |
+
arr = np.array(img).astype(np.float32) / 255.0
|
258 |
+
resized = cv2.resize(arr, (95, 95), interpolation=cv2.INTER_CUBIC)
|
259 |
+
pmw_arrays.append(resized)
|
260 |
+
ir=np.array(ir_arrays)
|
261 |
+
pmw=np.array(pmw_arrays)
|
262 |
+
# Stack into (8, 95, 95)
|
263 |
+
ir_seq = process_images(ir)
|
264 |
+
pmw_seq = process_images(pmw)
|
265 |
+
|
266 |
+
# st.write(f"IR sequence shape: {ir_seq.shape}")
|
267 |
+
# st.write(f"PMW sequence shape: {pmw_seq.shape}")
|
268 |
+
|
269 |
+
# For demonstration: create batches
|
270 |
+
X_train_new = ir_seq.reshape((1, 8, 95, 95)) # Shape: (1, 8, 95, 95)
|
271 |
+
# X_test_new = X_valid = X_train_new.copy() # Dummy copies for now
|
272 |
+
# st.write(f"X_train_new shape: {X_train_new.shape}")
|
273 |
+
cc_mask= compute_convective_core_masks(X_train_new)
|
274 |
+
# st.write("CC Mask Shape:", cc_mask.shape)
|
275 |
+
hov_m_train = generate_hovmoller(X_train_new)
|
276 |
+
# hov_m_test = generate_hovmoller(X_test_new)
|
277 |
+
# hov_m_valid = generate_hovmoller(X_valid)
|
278 |
+
hov_m_train[np.isnan(hov_m_train)] = 0
|
279 |
+
hov_m_train = hov_m_train.transpose(0, 2, 3, 1)
|
280 |
+
# st.success("Hovmöller diagrams generated ✅")
|
281 |
+
# st.write("Hovmöller Train Shape:", hov_m_train.shape)
|
282 |
+
# st.write("Hovmöller Test Shape:", hov_m_test.shape)
|
283 |
+
# st.write("Hovmöller Valid Shape:", hov_m_valid.shape)
|
284 |
+
|
285 |
+
# Visualize first sample
|
286 |
+
# st.subheader("Hovmöller Sample (Train Set)")
|
287 |
+
# for t in range(8):
|
288 |
+
# st.image(hov_m_train[0, t], caption=f"Time Step {t+1}", clamp=True, width=150)
|
289 |
+
# st.write(hov_m_train[0,0])
|
290 |
+
cc_mask[np.isnan(cc_mask)] = 0
|
291 |
+
cc_mask=cc_mask.reshape(1, 8, 95, 95, 1)
|
292 |
+
i_images=cc_mask+ir_seq
|
293 |
+
reduced_images = np.concatenate([i_images,pmw_seq ], axis=-1)
|
294 |
+
reduced_images[np.isnan(reduced_images)] = 0
|
295 |
+
# st.write("Reduced Images Shape:", reduced_images.shape)
|
296 |
+
# y=np.isnan(reduced_images).sum()
|
297 |
+
# st.write("Reduced Images NaN Count:", y)
|
298 |
+
# model_predict_fn = {
|
299 |
+
# "ConvGRU": predict,
|
300 |
+
# "ConvLSTM": predict_lstm,
|
301 |
+
# "3DCNN":predict_3dcnn,
|
302 |
+
# "Traj-GRU": predict_trajgru,
|
303 |
+
# "spatiotemporalLSTM": predict_stlstm,
|
304 |
+
# "Unet_LSTM": predict_unetlstm,
|
305 |
+
# }[model_choice]
|
306 |
+
if model_choice == "Unet_LSTM":
|
307 |
+
import tensorflow as tf
|
308 |
+
|
309 |
+
def tf_gradient_magnitude(images):
|
310 |
+
# Sobel kernels
|
311 |
+
sobel_x = tf.constant([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=tf.float32)
|
312 |
+
sobel_y = tf.constant([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=tf.float32)
|
313 |
+
sobel_x = tf.reshape(sobel_x, [3, 3, 1, 1])
|
314 |
+
sobel_y = tf.reshape(sobel_y, [3, 3, 1, 1])
|
315 |
+
|
316 |
+
images = tf.convert_to_tensor(images, dtype=tf.float32)
|
317 |
+
images = tf.expand_dims(images, -1)
|
318 |
+
|
319 |
+
gx = tf.nn.conv2d(images, sobel_x, strides=1, padding='SAME')
|
320 |
+
gy = tf.nn.conv2d(images, sobel_y, strides=1, padding='SAME')
|
321 |
+
grad_mag = tf.sqrt(tf.square(gx) + tf.square(gy) + 1e-6)
|
322 |
+
|
323 |
+
return tf.squeeze(grad_mag, -1).numpy()
|
324 |
+
def GM_maps_prep(ir):
|
325 |
+
GM_maps=[]
|
326 |
+
for i in ir:
|
327 |
+
GM_map = tf_gradient_magnitude(i)
|
328 |
+
GM_maps.append(GM_map)
|
329 |
+
GM_maps=np.array(GM_maps)
|
330 |
+
return GM_maps
|
331 |
+
ir_seq=ir_seq.reshape(8, 95, 95, 1)
|
332 |
+
GM_maps = GM_maps_prep(ir_seq)
|
333 |
+
print(GM_maps.shape)
|
334 |
+
GM_maps=GM_maps.reshape(1, 8, 95, 95, 1)
|
335 |
+
i_images=cc_mask+ir_seq+GM_maps
|
336 |
+
reduced_images = np.concatenate([i_images,pmw_seq ], axis=-1)
|
337 |
+
reduced_images[np.isnan(reduced_images)] = 0
|
338 |
+
print(reduced_images.shape)
|
339 |
+
y = model_predict_fn(reduced_images, hov_m_train, train_vmax_3d, lat_processed, lon_processed, v_max_diff)
|
340 |
+
else:
|
341 |
+
y = model_predict_fn(reduced_images, hov_m_train, train_vmax_3d, lat_processed, lon_processed, v_max_diff)
|
342 |
+
st.write("Predicted Vmax:", y)
|
343 |
+
else:
|
344 |
+
st.error("Make sure you uploaded exactly 8 IR and 8 PMW images.")
|
cnn3d.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# Define the ConvGRU2DLayer (same as before)
|
6 |
+
|
7 |
+
|
8 |
+
def build_3d_conv_model(input_shape=(8, 95, 95, 2), batch_size=16):
|
9 |
+
"""
|
10 |
+
Builds a 3D ConvLSTM model with Conv3D layers and MaxPooling3D layers.
|
11 |
+
|
12 |
+
Parameters:
|
13 |
+
- input_shape: Shape of the input tensor (time_steps, height, width, channels).
|
14 |
+
- batch_size: Batch size for the model.
|
15 |
+
|
16 |
+
Returns:
|
17 |
+
- model: The compiled Keras model.
|
18 |
+
"""
|
19 |
+
# Input tensor
|
20 |
+
input_tensor = layers.Input(shape=input_shape)
|
21 |
+
|
22 |
+
# First ConvLSTM2D block with Conv3D
|
23 |
+
# x = layers.ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=True)(input_tensor)
|
24 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(input_tensor)
|
25 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
26 |
+
|
27 |
+
# Second ConvLSTM2D block with Conv3D
|
28 |
+
# x = layers.ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
|
29 |
+
x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
30 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
31 |
+
|
32 |
+
# Third ConvLSTM2D block with Conv3D
|
33 |
+
# x = layers.ConvLSTM2D(filters=128, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
|
34 |
+
x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
35 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
36 |
+
|
37 |
+
# Flatten the output before passing to the fully connected layers
|
38 |
+
x = layers.Flatten()(x)
|
39 |
+
|
40 |
+
# Create the final model
|
41 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
42 |
+
|
43 |
+
return model
|
44 |
+
|
45 |
+
def radial_structure_subnet(input_shape):
|
46 |
+
"""
|
47 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
48 |
+
|
49 |
+
Parameters:
|
50 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
- model: tf.keras.Model, the radial structure subnet model
|
54 |
+
"""
|
55 |
+
|
56 |
+
input_tensor = layers.Input(shape=input_shape)
|
57 |
+
|
58 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
59 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
60 |
+
|
61 |
+
# Quadrant extraction - using slicing to separate quadrants
|
62 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
63 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
64 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
65 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
66 |
+
|
67 |
+
|
68 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
69 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
70 |
+
|
71 |
+
# Padding the quadrants to match the target size (48, 48)
|
72 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
73 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
74 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
75 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
76 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
77 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
78 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
79 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
80 |
+
|
81 |
+
print(nw_quadrant.shape)
|
82 |
+
print(ne_quadrant.shape)
|
83 |
+
print(sw_quadrant.shape)
|
84 |
+
print(se_quadrant.shape)
|
85 |
+
# Main branch (processing the entire structure)
|
86 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
87 |
+
y=layers.MaxPool2D()(main_branch)
|
88 |
+
|
89 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
90 |
+
(0, target_width - y.shape[2])))(y)
|
91 |
+
# Side branches (processing the individual quadrants)
|
92 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
93 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
94 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
95 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
96 |
+
|
97 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
98 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
99 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
100 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
101 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
102 |
+
|
103 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
104 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
105 |
+
|
106 |
+
# Additional convolution layer to combine the fused features
|
107 |
+
x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
108 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
109 |
+
# Final dense layer for further processing
|
110 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
111 |
+
|
112 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
113 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
114 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
115 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
116 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
117 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
118 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
119 |
+
|
120 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
121 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
122 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
123 |
+
|
124 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
125 |
+
|
126 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
127 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
128 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
129 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
130 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
131 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
132 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
133 |
+
|
134 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
135 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
136 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
137 |
+
# Create and return the model
|
138 |
+
x=layers.Flatten()(x)
|
139 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
140 |
+
return model
|
141 |
+
|
142 |
+
# Define input shape (batch_size, height, width, channels)
|
143 |
+
# input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
|
144 |
+
|
145 |
+
# # Build the model
|
146 |
+
# model = radial_structure_subnet(input_shape)
|
147 |
+
|
148 |
+
# # Model summary
|
149 |
+
# model.summary()
|
150 |
+
|
151 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
152 |
+
# Define the input layer
|
153 |
+
input_tensor = layers.Input(shape=input_shape)
|
154 |
+
|
155 |
+
# Convolutional layer
|
156 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
157 |
+
x = layers.BatchNormalization()(x)
|
158 |
+
x = layers.ReLU()(x)
|
159 |
+
|
160 |
+
# Flatten layer
|
161 |
+
x = layers.Flatten()(x)
|
162 |
+
|
163 |
+
# Create the model
|
164 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
165 |
+
|
166 |
+
return model
|
167 |
+
|
168 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
169 |
+
|
170 |
+
def build_combined_model():
|
171 |
+
# Define input shapes
|
172 |
+
input_shape_3d = (8, 95, 95, 2)
|
173 |
+
input_shape_radial = (95, 95, 8)
|
174 |
+
input_shape_cnn = (8, 8, 1)
|
175 |
+
|
176 |
+
input_shape_latitude = (8,)
|
177 |
+
input_shape_longitude = (8,)
|
178 |
+
input_shape_other = (9,)
|
179 |
+
|
180 |
+
# Build individual models
|
181 |
+
model_3d = build_3d_conv_model(input_shape=input_shape_3d)
|
182 |
+
model_radial = radial_structure_subnet(input_shape=input_shape_radial)
|
183 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
184 |
+
|
185 |
+
# Define new inputs
|
186 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
187 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
188 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
189 |
+
|
190 |
+
# Flatten the additional inputs
|
191 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
192 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
193 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
194 |
+
|
195 |
+
# Combine all outputs
|
196 |
+
combined = layers.concatenate([
|
197 |
+
model_3d.output,
|
198 |
+
model_radial.output,
|
199 |
+
model_cnn.output,
|
200 |
+
flat_latitude,
|
201 |
+
flat_longitude,
|
202 |
+
flat_other
|
203 |
+
])
|
204 |
+
|
205 |
+
# Add dense layers for final processing
|
206 |
+
x = layers.Dense(128, activation='relu')(combined)
|
207 |
+
x = layers.Dense(1, activation=None)(x)
|
208 |
+
|
209 |
+
# Create the final model
|
210 |
+
final_model = models.Model(
|
211 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
212 |
+
input_latitude, input_longitude, input_other ],
|
213 |
+
outputs=x
|
214 |
+
)
|
215 |
+
|
216 |
+
return final_model
|
217 |
+
|
218 |
+
import h5py
|
219 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\3dcnn-model.h5", 'r') as f:
|
220 |
+
print(f.attrs.get('keras_version'))
|
221 |
+
print(f.attrs.get('backend'))
|
222 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
223 |
+
|
224 |
+
model = build_combined_model() # Your original model building function
|
225 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\3dcnn-model.h5")
|
226 |
+
|
227 |
+
|
228 |
+
def predict_3dcnn(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
229 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
230 |
+
return y
|
convgru-model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8dbcbae05fe07d8120a71ee4eab2903ebf2b585ae90b1eed2e893d39d846d4e
|
3 |
+
size 32404512
|
convlstm.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# Define the ConvGRU2DLayer (same as before)
|
6 |
+
|
7 |
+
def build_3d_conv_lstm_model(input_shape=(8, 95, 95, 2), batch_size=16):
|
8 |
+
"""
|
9 |
+
Builds a 3D ConvLSTM model with Conv3D layers and MaxPooling3D layers.
|
10 |
+
|
11 |
+
Parameters:
|
12 |
+
- input_shape: Shape of the input tensor (time_steps, height, width, channels).
|
13 |
+
- batch_size: Batch size for the model.
|
14 |
+
|
15 |
+
Returns:
|
16 |
+
- model: The compiled Keras model.
|
17 |
+
"""
|
18 |
+
# Input tensor
|
19 |
+
input_tensor = layers.Input(shape=input_shape)
|
20 |
+
|
21 |
+
# First ConvLSTM2D block with Conv3D
|
22 |
+
x = layers.ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=True)(input_tensor)
|
23 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
24 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
25 |
+
|
26 |
+
# Second ConvLSTM2D block with Conv3D
|
27 |
+
x = layers.ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
|
28 |
+
x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
29 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
30 |
+
|
31 |
+
# Third ConvLSTM2D block with Conv3D
|
32 |
+
x = layers.ConvLSTM2D(filters=128, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
|
33 |
+
x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
34 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
35 |
+
|
36 |
+
# Flatten the output before passing to the fully connected layers
|
37 |
+
x = layers.Flatten()(x)
|
38 |
+
|
39 |
+
# Create the final model
|
40 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
41 |
+
|
42 |
+
return model
|
43 |
+
|
44 |
+
def radial_structure_subnet(input_shape):
|
45 |
+
"""
|
46 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
47 |
+
|
48 |
+
Parameters:
|
49 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
- model: tf.keras.Model, the radial structure subnet model
|
53 |
+
"""
|
54 |
+
|
55 |
+
input_tensor = layers.Input(shape=input_shape)
|
56 |
+
|
57 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
58 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
59 |
+
|
60 |
+
# Quadrant extraction - using slicing to separate quadrants
|
61 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
62 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
63 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
64 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
65 |
+
|
66 |
+
|
67 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
68 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
69 |
+
|
70 |
+
# Padding the quadrants to match the target size (48, 48)
|
71 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
72 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
73 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
74 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
75 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
76 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
77 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
78 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
79 |
+
|
80 |
+
print(nw_quadrant.shape)
|
81 |
+
print(ne_quadrant.shape)
|
82 |
+
print(sw_quadrant.shape)
|
83 |
+
print(se_quadrant.shape)
|
84 |
+
# Main branch (processing the entire structure)
|
85 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
86 |
+
y=layers.MaxPool2D()(main_branch)
|
87 |
+
|
88 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
89 |
+
(0, target_width - y.shape[2])))(y)
|
90 |
+
# Side branches (processing the individual quadrants)
|
91 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
92 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
93 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
94 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
95 |
+
|
96 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
97 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
98 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
99 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
100 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
101 |
+
|
102 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
103 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
104 |
+
|
105 |
+
# Additional convolution layer to combine the fused features
|
106 |
+
x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
107 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
108 |
+
# Final dense layer for further processing
|
109 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
110 |
+
|
111 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
112 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
113 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
114 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
115 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
116 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
117 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
118 |
+
|
119 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
120 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
121 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
122 |
+
|
123 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
124 |
+
|
125 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
126 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
127 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
128 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
129 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
130 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
131 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
132 |
+
|
133 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
134 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
135 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
136 |
+
# Create and return the model
|
137 |
+
x=layers.Flatten()(x)
|
138 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
139 |
+
return model
|
140 |
+
|
141 |
+
# Define input shape (batch_size, height, width, channels)
|
142 |
+
# input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
|
143 |
+
|
144 |
+
# # Build the model
|
145 |
+
# model = radial_structure_subnet(input_shape)
|
146 |
+
|
147 |
+
# # Model summary
|
148 |
+
# model.summary()
|
149 |
+
|
150 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
151 |
+
# Define the input layer
|
152 |
+
input_tensor = layers.Input(shape=input_shape)
|
153 |
+
|
154 |
+
# Convolutional layer
|
155 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
156 |
+
x = layers.BatchNormalization()(x)
|
157 |
+
x = layers.ReLU()(x)
|
158 |
+
|
159 |
+
# Flatten layer
|
160 |
+
x = layers.Flatten()(x)
|
161 |
+
|
162 |
+
# Create the model
|
163 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
164 |
+
|
165 |
+
return model
|
166 |
+
|
167 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
168 |
+
|
169 |
+
def build_combined_model():
|
170 |
+
# Define input shapes
|
171 |
+
input_shape_3d = (8, 95, 95, 2)
|
172 |
+
input_shape_radial = (95, 95, 8)
|
173 |
+
input_shape_cnn = (8, 8, 1)
|
174 |
+
|
175 |
+
input_shape_latitude = (8,)
|
176 |
+
input_shape_longitude = (8,)
|
177 |
+
input_shape_other = (9,)
|
178 |
+
|
179 |
+
# Build individual models
|
180 |
+
model_3d = build_3d_conv_lstm_model(input_shape=input_shape_3d)
|
181 |
+
model_radial = radial_structure_subnet(input_shape=input_shape_radial)
|
182 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
183 |
+
|
184 |
+
# Define new inputs
|
185 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
186 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
187 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
188 |
+
|
189 |
+
# Flatten the additional inputs
|
190 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
191 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
192 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
193 |
+
|
194 |
+
# Combine all outputs
|
195 |
+
combined = layers.concatenate([
|
196 |
+
model_3d.output,
|
197 |
+
model_radial.output,
|
198 |
+
model_cnn.output,
|
199 |
+
flat_latitude,
|
200 |
+
flat_longitude,
|
201 |
+
flat_other
|
202 |
+
])
|
203 |
+
|
204 |
+
# Add dense layers for final processing
|
205 |
+
x = layers.Dense(128, activation='relu')(combined)
|
206 |
+
x = layers.Dense(1, activation=None)(x)
|
207 |
+
|
208 |
+
# Create the final model
|
209 |
+
final_model = models.Model(
|
210 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
211 |
+
input_latitude, input_longitude, input_other ],
|
212 |
+
outputs=x
|
213 |
+
)
|
214 |
+
|
215 |
+
return final_model
|
216 |
+
|
217 |
+
import h5py
|
218 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\lstm3dcnn-model.h5", 'r') as f:
|
219 |
+
print(f.attrs.get('keras_version'))
|
220 |
+
print(f.attrs.get('backend'))
|
221 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
222 |
+
|
223 |
+
model = build_combined_model() # Your original model building function
|
224 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\lstm3dcnn-model.h5")
|
225 |
+
|
226 |
+
|
227 |
+
def predict_lstm(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
228 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
229 |
+
return y
|
final_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e64f09411247db030b3a58007f60f1170b672aa361b514381e37691793a833d8
|
3 |
+
size 18915736
|
gru_model.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# Define the ConvGRU2DLayer (same as before)
|
6 |
+
class ConvGRU2DLayer(layers.Layer):
|
7 |
+
def __init__(self, filters, kernel_size, return_sequences=True, **kwargs):
|
8 |
+
super().__init__(**kwargs)
|
9 |
+
self.filters = filters
|
10 |
+
self.kernel_size = kernel_size
|
11 |
+
self.return_sequences = return_sequences
|
12 |
+
|
13 |
+
def build(self, input_shape):
|
14 |
+
self.input_projection = layers.Conv2D(self.filters, (1, 1), padding="same")
|
15 |
+
self.conv_z = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
|
16 |
+
self.conv_r = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="sigmoid")
|
17 |
+
self.conv_h = layers.Conv2D(self.filters, self.kernel_size, padding="same", activation="tanh")
|
18 |
+
super().build(input_shape)
|
19 |
+
|
20 |
+
def call(self, inputs):
|
21 |
+
batch_size, time_steps, height, width, channels = tf.unstack(tf.shape(inputs))
|
22 |
+
time_steps=inputs.shape[1]
|
23 |
+
h_t = tf.zeros((batch_size, height, width, self.filters))
|
24 |
+
outputs = []
|
25 |
+
|
26 |
+
for t in range(time_steps):
|
27 |
+
x_t = inputs[:, t, :, :, :]
|
28 |
+
x_projected = self.input_projection(x_t)
|
29 |
+
z = self.conv_z(x_projected)+self.conv_z(h_t)
|
30 |
+
r = self.conv_r(x_projected)+self.conv_z(h_t)
|
31 |
+
h_tilde = self.conv_h(r * h_t)
|
32 |
+
h_t = (1 - z) * h_t + z * h_tilde
|
33 |
+
|
34 |
+
if self.return_sequences:
|
35 |
+
outputs.append(h_t)
|
36 |
+
|
37 |
+
if self.return_sequences:
|
38 |
+
outputs = tf.stack(outputs, axis=1)
|
39 |
+
else:
|
40 |
+
outputs = h_t
|
41 |
+
|
42 |
+
return outputs
|
43 |
+
|
44 |
+
# Define the model (same as before)
|
45 |
+
def build_convgru_model(input_shape=(8, 95, 95, 2)):
|
46 |
+
input_tensor = layers.Input(shape=input_shape)
|
47 |
+
x = ConvGRU2DLayer(filters=32, kernel_size=(3, 3), return_sequences=True)(input_tensor)
|
48 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
49 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
50 |
+
x = ConvGRU2DLayer(filters=64, kernel_size=(3, 3), return_sequences=True)(x)
|
51 |
+
x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
52 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(4, 3, 3), padding='same')(x)
|
53 |
+
x = ConvGRU2DLayer(filters=128, kernel_size=(3, 3), return_sequences=True)(x)
|
54 |
+
x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
55 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
56 |
+
x = layers.Flatten()(x)
|
57 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
58 |
+
return model
|
59 |
+
|
60 |
+
def radial_structure_subnet(input_shape):
|
61 |
+
"""
|
62 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
63 |
+
|
64 |
+
Parameters:
|
65 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
66 |
+
|
67 |
+
Returns:
|
68 |
+
- model: tf.keras.Model, the radial structure subnet model
|
69 |
+
"""
|
70 |
+
|
71 |
+
input_tensor = layers.Input(shape=input_shape)
|
72 |
+
|
73 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
74 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
75 |
+
|
76 |
+
# Quadrant extraction - using slicing to separate quadrants
|
77 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
78 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
79 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
80 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
81 |
+
|
82 |
+
|
83 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
84 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
85 |
+
|
86 |
+
# Padding the quadrants to match the target size (48, 48)
|
87 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
88 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
89 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
90 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
91 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
92 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
93 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
94 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
95 |
+
|
96 |
+
print(nw_quadrant.shape)
|
97 |
+
print(ne_quadrant.shape)
|
98 |
+
print(sw_quadrant.shape)
|
99 |
+
print(se_quadrant.shape)
|
100 |
+
# Main branch (processing the entire structure)
|
101 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
102 |
+
y=layers.MaxPool2D()(main_branch)
|
103 |
+
|
104 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
105 |
+
(0, target_width - y.shape[2])))(y)
|
106 |
+
# Side branches (processing the individual quadrants)
|
107 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
108 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
109 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
110 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
111 |
+
|
112 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
113 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
114 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
115 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
116 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
117 |
+
|
118 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
119 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
120 |
+
|
121 |
+
# Additional convolution layer to combine the fused features
|
122 |
+
x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
123 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
124 |
+
# Final dense layer for further processing
|
125 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
126 |
+
|
127 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
128 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
129 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
130 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
131 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
132 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
133 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
134 |
+
|
135 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
136 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
137 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
138 |
+
|
139 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
140 |
+
|
141 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
142 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
143 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
144 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
145 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
146 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
147 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
148 |
+
|
149 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
150 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
151 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
152 |
+
# Create and return the model
|
153 |
+
x=layers.Flatten()(x)
|
154 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
155 |
+
return model
|
156 |
+
|
157 |
+
# Define input shape (batch_size, height, width, channels)
|
158 |
+
# input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
|
159 |
+
|
160 |
+
# # Build the model
|
161 |
+
# model = radial_structure_subnet(input_shape)
|
162 |
+
|
163 |
+
# # Model summary
|
164 |
+
# model.summary()
|
165 |
+
|
166 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
167 |
+
# Define the input layer
|
168 |
+
input_tensor = layers.Input(shape=input_shape)
|
169 |
+
|
170 |
+
# Convolutional layer
|
171 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
172 |
+
x = layers.BatchNormalization()(x)
|
173 |
+
x = layers.ReLU()(x)
|
174 |
+
|
175 |
+
# Flatten layer
|
176 |
+
x = layers.Flatten()(x)
|
177 |
+
|
178 |
+
# Create the model
|
179 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
180 |
+
|
181 |
+
return model
|
182 |
+
|
183 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
184 |
+
|
185 |
+
def build_combined_model():
|
186 |
+
# Define input shapes
|
187 |
+
input_shape_3d = (8, 95, 95, 2)
|
188 |
+
input_shape_radial = (95, 95, 8)
|
189 |
+
input_shape_cnn = (8, 8, 1)
|
190 |
+
|
191 |
+
input_shape_latitude = (8,)
|
192 |
+
input_shape_longitude = (8,)
|
193 |
+
input_shape_other = (9,)
|
194 |
+
|
195 |
+
# Build individual models
|
196 |
+
model_3d = build_convgru_model(input_shape=input_shape_3d)
|
197 |
+
model_radial = radial_structure_subnet(input_shape=input_shape_radial)
|
198 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
199 |
+
|
200 |
+
# Define new inputs
|
201 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
202 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
203 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
204 |
+
|
205 |
+
# Flatten the additional inputs
|
206 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
207 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
208 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
209 |
+
|
210 |
+
# Combine all outputs
|
211 |
+
combined = layers.concatenate([
|
212 |
+
model_3d.output,
|
213 |
+
model_radial.output,
|
214 |
+
model_cnn.output,
|
215 |
+
flat_latitude,
|
216 |
+
flat_longitude,
|
217 |
+
flat_other
|
218 |
+
])
|
219 |
+
|
220 |
+
# Add dense layers for final processing
|
221 |
+
x = layers.Dense(128, activation='relu')(combined)
|
222 |
+
x = layers.Dense(1, activation=None)(x)
|
223 |
+
|
224 |
+
# Create the final model
|
225 |
+
final_model = models.Model(
|
226 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
227 |
+
input_latitude, input_longitude, input_other ],
|
228 |
+
outputs=x
|
229 |
+
)
|
230 |
+
|
231 |
+
return final_model
|
232 |
+
|
233 |
+
import h5py
|
234 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\convgru-model.h5", 'r') as f:
|
235 |
+
print(f.attrs.get('keras_version'))
|
236 |
+
print(f.attrs.get('backend'))
|
237 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
238 |
+
|
239 |
+
model = build_combined_model() # Your original model building function
|
240 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\convgru-model.h5")
|
241 |
+
|
242 |
+
|
243 |
+
def predict(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
244 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
245 |
+
return y
|
lstm3dcnn-model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc8b3754dc42d21889047c900b83c9826346953eb9cdd06a3a4118f6a3912e42
|
3 |
+
size 39027576
|
requirements.txt
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.2.2
|
2 |
+
altair==5.5.0
|
3 |
+
astunparse==1.6.3
|
4 |
+
attrs==25.3.0
|
5 |
+
blinker==1.9.0
|
6 |
+
cachetools==5.5.2
|
7 |
+
certifi==2025.1.31
|
8 |
+
charset-normalizer==3.4.1
|
9 |
+
click==8.1.8
|
10 |
+
colorama==0.4.6
|
11 |
+
flatbuffers==25.2.10
|
12 |
+
gast==0.6.0
|
13 |
+
gitdb==4.0.12
|
14 |
+
GitPython==3.1.44
|
15 |
+
google-pasta==0.2.0
|
16 |
+
grpcio==1.71.0
|
17 |
+
h5py==3.13.0
|
18 |
+
idna==3.10
|
19 |
+
Jinja2==3.1.6
|
20 |
+
jsonschema==4.23.0
|
21 |
+
jsonschema-specifications==2024.10.1
|
22 |
+
keras==3.9.2
|
23 |
+
libclang==18.1.1
|
24 |
+
Markdown==3.8
|
25 |
+
markdown-it-py==3.0.0
|
26 |
+
MarkupSafe==3.0.2
|
27 |
+
mdurl==0.1.2
|
28 |
+
ml_dtypes==0.5.1
|
29 |
+
namex==0.0.8
|
30 |
+
narwhals==1.34.1
|
31 |
+
numpy==2.1.3
|
32 |
+
opencv-python==4.11.0.86
|
33 |
+
opt_einsum==3.4.0
|
34 |
+
optree==0.15.0
|
35 |
+
packaging==24.2
|
36 |
+
pandas==2.2.3
|
37 |
+
pillow==11.2.1
|
38 |
+
protobuf==5.29.4
|
39 |
+
pyarrow==19.0.1
|
40 |
+
pydeck==0.9.1
|
41 |
+
Pygments==2.19.1
|
42 |
+
python-dateutil==2.9.0.post0
|
43 |
+
pytz==2025.2
|
44 |
+
referencing==0.36.2
|
45 |
+
requests==2.32.3
|
46 |
+
rich==14.0.0
|
47 |
+
rpds-py==0.24.0
|
48 |
+
scipy==1.15.2
|
49 |
+
setuptools==78.1.0
|
50 |
+
six==1.17.0
|
51 |
+
smmap==5.0.2
|
52 |
+
streamlit==1.44.1
|
53 |
+
tenacity==9.1.2
|
54 |
+
tensorboard==2.19.0
|
55 |
+
tensorboard-data-server==0.7.2
|
56 |
+
tensorflow==2.19.0
|
57 |
+
termcolor==3.0.1
|
58 |
+
toml==0.10.2
|
59 |
+
tornado==6.4.2
|
60 |
+
typing_extensions==4.13.2
|
61 |
+
tzdata==2025.2
|
62 |
+
urllib3==2.4.0
|
63 |
+
watchdog==6.0.0
|
64 |
+
Werkzeug==3.1.3
|
65 |
+
wheel==0.45.1
|
66 |
+
wrapt==1.17.2
|
spaio_temp.py
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
class SpatiotemporalLSTMCell(layers.Layer):
|
7 |
+
"""
|
8 |
+
SpatiotemporalLSTMCell: A custom LSTM cell that captures both spatial and temporal dependencies.
|
9 |
+
It extends the traditional LSTM by adding a memory state (m_t) that focuses on spatial correlations.
|
10 |
+
"""
|
11 |
+
def __init__(self, filters, kernel_size, **kwargs):
|
12 |
+
super().__init__(**kwargs)
|
13 |
+
self.filters = filters # Number of output filters in the convolution
|
14 |
+
self.kernel_size = kernel_size # Size of the convolutional kernel
|
15 |
+
|
16 |
+
# Convolutional components for standard LSTM operations
|
17 |
+
self.conv_xg = layers.Conv2D(filters, kernel_size, padding="same", activation="tanh") # For cell input
|
18 |
+
self.conv_xi = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid") # For input gate
|
19 |
+
self.conv_xf = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid") # For forget gate
|
20 |
+
self.conv_xo = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid") # For output gate
|
21 |
+
|
22 |
+
# Convolutional components for spatiotemporal memory operations
|
23 |
+
self.conv_xg_st = layers.Conv2D(filters, kernel_size, padding="same", activation="tanh") # For ST cell input
|
24 |
+
self.conv_xi_st = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid") # For ST input gate
|
25 |
+
self.conv_xf_st = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid") # For ST forget gate
|
26 |
+
|
27 |
+
# Fusion layer to combine the cell state and spatiotemporal memory
|
28 |
+
self.conv_fusion = layers.Conv2D(filters, (1, 1), padding="same") # 1x1 conv for dimensionality reduction
|
29 |
+
|
30 |
+
def call(self, inputs, states):
|
31 |
+
"""
|
32 |
+
Forward pass of the spatiotemporal LSTM cell.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
inputs: Input tensor of shape [batch_size, height, width, channels]
|
36 |
+
states: List of previous states [h_t-1, c_t-1, m_t-1]
|
37 |
+
h_t-1: previous hidden state
|
38 |
+
c_t-1: previous cell state
|
39 |
+
m_t-1: previous spatiotemporal memory
|
40 |
+
"""
|
41 |
+
prev_h, prev_c, prev_m = states
|
42 |
+
|
43 |
+
# Standard LSTM operations
|
44 |
+
g_t = self.conv_xg(inputs) + self.conv_xg(prev_h) # Cell input activation
|
45 |
+
i_t = self.conv_xi(inputs) + self.conv_xi(prev_h) # Input gate
|
46 |
+
f_t = self.conv_xf(inputs) + self.conv_xf(prev_h) # Forget gate
|
47 |
+
o_t = self.conv_xo(inputs) + self.conv_xo(prev_h) # Output gate
|
48 |
+
|
49 |
+
# Cell state update - bug detected: should use prev_c instead of self.conv_xo(prev_h)
|
50 |
+
c_t = tf.sigmoid(f_t) * self.conv_xo(prev_h) + tf.sigmoid(i_t) * tf.tanh(g_t)
|
51 |
+
|
52 |
+
# Spatiotemporal memory operations
|
53 |
+
g_t_st = self.conv_xg_st(inputs) + self.conv_xg_st(prev_m) # ST cell input
|
54 |
+
i_t_st = self.conv_xi_st(inputs) + self.conv_xi_st(prev_m) # ST input gate
|
55 |
+
f_t_st = self.conv_xf_st(inputs) + self.conv_xf_st(prev_m) # ST forget gate
|
56 |
+
|
57 |
+
# Spatiotemporal memory update - bug detected: should use prev_m directly instead of self.conv_xf_st(prev_m)
|
58 |
+
m_t = tf.sigmoid(f_t_st) * self.conv_xf_st(prev_m) + tf.sigmoid(i_t_st) * tf.tanh(g_t_st)
|
59 |
+
|
60 |
+
# Hidden state update by fusing cell state and spatiotemporal memory
|
61 |
+
h_t = tf.sigmoid(o_t) * tf.tanh(self.conv_fusion(tf.concat([c_t, m_t], axis=-1)))
|
62 |
+
|
63 |
+
return h_t, [h_t, c_t, m_t] # Return the hidden state and all updated states
|
64 |
+
|
65 |
+
class SpatiotemporalLSTM(layers.Layer):
|
66 |
+
"""
|
67 |
+
SpatiotemporalLSTM: Custom layer that applies the SpatiotemporalLSTMCell to a sequence of inputs.
|
68 |
+
This processes 3D data with spatial and temporal dimensions.
|
69 |
+
"""
|
70 |
+
def __init__(self, filters, kernel_size, **kwargs):
|
71 |
+
super().__init__(**kwargs)
|
72 |
+
self.cell = SpatiotemporalLSTMCell(filters, kernel_size)
|
73 |
+
|
74 |
+
def call(self, inputs):
|
75 |
+
"""
|
76 |
+
Forward pass of the SpatiotemporalLSTM layer.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
inputs: Input tensor of shape [batch_size, time_steps, height, width, channels]
|
80 |
+
"""
|
81 |
+
batch_size = tf.shape(inputs)[0]
|
82 |
+
time_steps = inputs.shape[1]
|
83 |
+
height = inputs.shape[2]
|
84 |
+
width = inputs.shape[3]
|
85 |
+
channels = inputs.shape[4]
|
86 |
+
|
87 |
+
# Initialize states with zeros
|
88 |
+
h_t = tf.zeros((batch_size, height, width, channels)) # Hidden state
|
89 |
+
c_t = tf.zeros((batch_size, height, width, channels)) # Cell state
|
90 |
+
m_t = tf.zeros((batch_size, height, width, channels)) # Spatiotemporal memory
|
91 |
+
|
92 |
+
outputs = []
|
93 |
+
# Process sequence step by step
|
94 |
+
for t in range(time_steps):
|
95 |
+
# Apply the cell to the current time step and previous states
|
96 |
+
h_t, [h_t, c_t, m_t] = self.cell(inputs[:, t], [h_t[:,:,:,:inputs.shape[4]],
|
97 |
+
c_t[:,:,:,:inputs.shape[4]],
|
98 |
+
m_t[:,:,:,:inputs.shape[4]]])
|
99 |
+
outputs.append(h_t)
|
100 |
+
|
101 |
+
# Stack outputs along time dimension
|
102 |
+
return tf.stack(outputs, axis=1)
|
103 |
+
|
104 |
+
def build_st_lstm_model(input_shape=(8, 95, 95, 2)):
|
105 |
+
"""
|
106 |
+
Build a complete spatiotemporal LSTM model for sequence processing of spatial data.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
input_shape: Tuple of (time_steps, height, width, channels)
|
110 |
+
|
111 |
+
Returns:
|
112 |
+
A Keras model with spatiotemporal LSTM layers
|
113 |
+
"""
|
114 |
+
# Create input layer with fixed batch size
|
115 |
+
input_tensor = layers.Input(shape=input_shape, batch_size=16)
|
116 |
+
|
117 |
+
# First spatiotemporal LSTM block
|
118 |
+
st_lstm_layer = SpatiotemporalLSTM(filters=32, kernel_size=(3, 3))
|
119 |
+
x = st_lstm_layer(input_tensor)
|
120 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
121 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
122 |
+
|
123 |
+
# Second spatiotemporal LSTM block
|
124 |
+
st_lstm_layer = SpatiotemporalLSTM(filters=64, kernel_size=(3, 3))
|
125 |
+
x = st_lstm_layer(x)
|
126 |
+
x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
127 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
128 |
+
|
129 |
+
# Third spatiotemporal LSTM block
|
130 |
+
st_lstm_layer = SpatiotemporalLSTM(filters=128, kernel_size=(3, 3))
|
131 |
+
x = st_lstm_layer(x)
|
132 |
+
x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
133 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
134 |
+
|
135 |
+
# Flatten and prepare for output layers (not included in this model)
|
136 |
+
x = layers.Flatten()(x)
|
137 |
+
|
138 |
+
# Create and return the model
|
139 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
140 |
+
return model
|
141 |
+
|
142 |
+
def radial_structure_subnet(input_shape):
|
143 |
+
"""
|
144 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
145 |
+
|
146 |
+
Parameters:
|
147 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
148 |
+
|
149 |
+
Returns:
|
150 |
+
- model: tf.keras.Model, the radial structure subnet model
|
151 |
+
"""
|
152 |
+
|
153 |
+
input_tensor = layers.Input(shape=input_shape)
|
154 |
+
|
155 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
156 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
157 |
+
|
158 |
+
# Quadrant extraction - using slicing to separate quadrants
|
159 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
160 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
161 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
162 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
163 |
+
|
164 |
+
|
165 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
166 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
167 |
+
|
168 |
+
# Padding the quadrants to match the target size (48, 48)
|
169 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
170 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
171 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
172 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
173 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
174 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
175 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
176 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
177 |
+
|
178 |
+
print(nw_quadrant.shape)
|
179 |
+
print(ne_quadrant.shape)
|
180 |
+
print(sw_quadrant.shape)
|
181 |
+
print(se_quadrant.shape)
|
182 |
+
# Main branch (processing the entire structure)
|
183 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
184 |
+
y=layers.MaxPool2D()(main_branch)
|
185 |
+
|
186 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
187 |
+
(0, target_width - y.shape[2])))(y)
|
188 |
+
# Side branches (processing the individual quadrants)
|
189 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
190 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
191 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
192 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
193 |
+
|
194 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
195 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
196 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
197 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
198 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
199 |
+
|
200 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
201 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
202 |
+
|
203 |
+
# Additional convolution layer to combine the fused features
|
204 |
+
x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
205 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
206 |
+
# Final dense layer for further processing
|
207 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
208 |
+
|
209 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
210 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
211 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
212 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
213 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
214 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
215 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
216 |
+
|
217 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
218 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
219 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
220 |
+
|
221 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
222 |
+
|
223 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
224 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
225 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
226 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
227 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
228 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
229 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
230 |
+
|
231 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
232 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
233 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
234 |
+
# Create and return the model
|
235 |
+
x=layers.Flatten()(x)
|
236 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
237 |
+
return model
|
238 |
+
|
239 |
+
# Define input shape (batch_size, height, width, channels)
|
240 |
+
# input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
|
241 |
+
|
242 |
+
# # Build the model
|
243 |
+
# model = radial_structure_subnet(input_shape)
|
244 |
+
|
245 |
+
# # Model summary
|
246 |
+
# model.summary()
|
247 |
+
|
248 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
249 |
+
# Define the input layer
|
250 |
+
input_tensor = layers.Input(shape=input_shape)
|
251 |
+
|
252 |
+
# Convolutional layer
|
253 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
254 |
+
x = layers.BatchNormalization()(x)
|
255 |
+
x = layers.ReLU()(x)
|
256 |
+
|
257 |
+
# Flatten layer
|
258 |
+
x = layers.Flatten()(x)
|
259 |
+
|
260 |
+
# Create the model
|
261 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
262 |
+
|
263 |
+
return model
|
264 |
+
|
265 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
266 |
+
|
267 |
+
def build_combined_model():
|
268 |
+
# Define input shapes
|
269 |
+
input_shape_3d = (8, 95, 95, 2)
|
270 |
+
input_shape_radial = (95, 95, 8)
|
271 |
+
input_shape_cnn = (8, 8, 1)
|
272 |
+
|
273 |
+
input_shape_latitude = (8,)
|
274 |
+
input_shape_longitude = (8,)
|
275 |
+
input_shape_other = (9,)
|
276 |
+
|
277 |
+
# Build individual models
|
278 |
+
model_3d = build_st_lstm_model(input_shape=input_shape_3d)
|
279 |
+
model_radial = radial_structure_subnet(input_shape=input_shape_radial)
|
280 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
281 |
+
|
282 |
+
# Define new inputs
|
283 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
284 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
285 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
286 |
+
|
287 |
+
# Flatten the additional inputs
|
288 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
289 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
290 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
291 |
+
|
292 |
+
# Combine all outputs
|
293 |
+
combined = layers.concatenate([
|
294 |
+
model_3d.output,
|
295 |
+
model_radial.output,
|
296 |
+
model_cnn.output,
|
297 |
+
flat_latitude,
|
298 |
+
flat_longitude,
|
299 |
+
flat_other
|
300 |
+
])
|
301 |
+
|
302 |
+
# Add dense layers for final processing
|
303 |
+
x = layers.Dense(128, activation='relu')(combined)
|
304 |
+
x = layers.Dense(1, activation=None)(x)
|
305 |
+
|
306 |
+
# Create the final model
|
307 |
+
final_model = models.Model(
|
308 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
309 |
+
input_latitude, input_longitude, input_other ],
|
310 |
+
outputs=x
|
311 |
+
)
|
312 |
+
|
313 |
+
return final_model
|
314 |
+
|
315 |
+
import h5py
|
316 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\spatio_tempral_LSTM.h5", 'r') as f:
|
317 |
+
print(f.attrs.get('keras_version'))
|
318 |
+
print(f.attrs.get('backend'))
|
319 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
320 |
+
|
321 |
+
model = build_combined_model() # Your original model building function
|
322 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\spatio_tempral_LSTM.h5")
|
323 |
+
|
324 |
+
|
325 |
+
def predict_stlstm(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
326 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
327 |
+
return y
|
spatio_tempral_LSTM.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3534850c5d3eafe14e49bd523393066c5540bcd537e7aa52ff4476f234243059
|
3 |
+
size 54923632
|
trjgru.py
ADDED
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
class TrajectoryGRU2D(layers.Layer):
|
7 |
+
def __init__(self, filters, kernel_size, return_sequences=True, **kwargs):
|
8 |
+
super().__init__(**kwargs)
|
9 |
+
self.filters = filters
|
10 |
+
self.kernel_size = kernel_size
|
11 |
+
self.return_sequences = return_sequences
|
12 |
+
|
13 |
+
# Projection layer to match GRU feature space
|
14 |
+
self.input_projection = layers.Conv2D(filters, (1, 1), padding="same")
|
15 |
+
|
16 |
+
# GRU Gates
|
17 |
+
self.conv_z = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid")
|
18 |
+
self.conv_r = layers.Conv2D(filters, kernel_size, padding="same", activation="sigmoid")
|
19 |
+
self.conv_h = layers.Conv2D(filters, kernel_size, padding="same", activation="tanh")
|
20 |
+
|
21 |
+
# Motion-based trajectory update
|
22 |
+
self.motion_conv = layers.Conv2D(filters, kernel_size, padding="same", activation="tanh")
|
23 |
+
|
24 |
+
def build(self, input_shape):
|
25 |
+
# Ensures input_projection is built with the correct input shape
|
26 |
+
self.input_projection.build(input_shape[1:]) # Ignore batch dimension
|
27 |
+
super().build(input_shape)
|
28 |
+
|
29 |
+
def call(self, inputs):
|
30 |
+
# inputs shape: (batch_size, time_steps, height, width, channels)
|
31 |
+
batch_size, time_steps, height, width, channels = tf.unstack(tf.shape(inputs))
|
32 |
+
time_steps = inputs.shape[1]
|
33 |
+
|
34 |
+
# Initialize hidden state
|
35 |
+
h_t = tf.zeros((batch_size, height, width, self.filters))
|
36 |
+
|
37 |
+
# List to store outputs at each time step
|
38 |
+
outputs = []
|
39 |
+
|
40 |
+
# Iterate over time steps
|
41 |
+
for t in range(time_steps):
|
42 |
+
# Get the input at time step t
|
43 |
+
x_t = inputs[:, t, :, :, :]
|
44 |
+
|
45 |
+
# Project input to match GRU feature dimension
|
46 |
+
x_projected = self.input_projection(x_t)
|
47 |
+
|
48 |
+
# Compute motion-based trajectory update
|
49 |
+
motion_update = self.motion_conv(x_projected)
|
50 |
+
|
51 |
+
# Concatenate projected input, previous hidden state, and motion update
|
52 |
+
combined = tf.concat([x_projected, h_t, motion_update], axis=-1)
|
53 |
+
|
54 |
+
# Compute GRU gates
|
55 |
+
z = self.conv_z(combined) # Update gate
|
56 |
+
r = self.conv_r(combined) # Reset gate
|
57 |
+
|
58 |
+
# Compute candidate hidden state
|
59 |
+
h_tilde = self.conv_h(tf.concat([x_projected, r * h_t], axis=-1))
|
60 |
+
|
61 |
+
# Update hidden state with motion-based trajectory
|
62 |
+
h_t = (1 - z) * h_t + z * h_tilde + motion_update # Add motion update
|
63 |
+
|
64 |
+
# Store the output if return_sequences is True
|
65 |
+
if self.return_sequences:
|
66 |
+
outputs.append(h_t)
|
67 |
+
|
68 |
+
# Stack outputs along the time dimension if return_sequences is True
|
69 |
+
if self.return_sequences:
|
70 |
+
outputs = tf.stack(outputs, axis=1)
|
71 |
+
else:
|
72 |
+
outputs = h_t
|
73 |
+
|
74 |
+
return outputs
|
75 |
+
|
76 |
+
def compute_output_shape(self, input_shape):
|
77 |
+
if self.return_sequences:
|
78 |
+
return (input_shape[0], input_shape[1], input_shape[2], input_shape[3], self.filters)
|
79 |
+
else:
|
80 |
+
return (input_shape[0], input_shape[2], input_shape[3], self.filters)
|
81 |
+
|
82 |
+
|
83 |
+
def build_tgru_model(input_shape=(8, 95, 95, 2)): # (time_steps, height, width, channels)
|
84 |
+
input_tensor = layers.Input(shape=input_shape)
|
85 |
+
|
86 |
+
# Apply TGRU Layers
|
87 |
+
x = TrajectoryGRU2D(filters=32, kernel_size=(3, 3), return_sequences=True)(input_tensor)
|
88 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
89 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
90 |
+
|
91 |
+
x = TrajectoryGRU2D(filters=64, kernel_size=(3, 3), return_sequences=True)(x)
|
92 |
+
x = layers.Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
93 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
94 |
+
|
95 |
+
x = TrajectoryGRU2D(filters=128, kernel_size=(3, 3), return_sequences=True)(x)
|
96 |
+
x = layers.Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(x)
|
97 |
+
x = layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(x)
|
98 |
+
|
99 |
+
# Flatten before Fully Connected Layer
|
100 |
+
x = layers.Flatten()(x)
|
101 |
+
# x = layers.Dense(1, activation='sigmoid')(x)
|
102 |
+
|
103 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
104 |
+
return model
|
105 |
+
|
106 |
+
def radial_structure_subnet(input_shape):
|
107 |
+
"""
|
108 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
109 |
+
|
110 |
+
Parameters:
|
111 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
- model: tf.keras.Model, the radial structure subnet model
|
115 |
+
"""
|
116 |
+
|
117 |
+
input_tensor = layers.Input(shape=input_shape)
|
118 |
+
|
119 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
120 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
121 |
+
|
122 |
+
# Quadrant extraction - using slicing to separate quadrants
|
123 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
124 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
125 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
126 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
127 |
+
|
128 |
+
|
129 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
130 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
131 |
+
|
132 |
+
# Padding the quadrants to match the target size (48, 48)
|
133 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
134 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
135 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
136 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
137 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
138 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
139 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
140 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
141 |
+
|
142 |
+
print(nw_quadrant.shape)
|
143 |
+
print(ne_quadrant.shape)
|
144 |
+
print(sw_quadrant.shape)
|
145 |
+
print(se_quadrant.shape)
|
146 |
+
# Main branch (processing the entire structure)
|
147 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
148 |
+
y=layers.MaxPool2D()(main_branch)
|
149 |
+
|
150 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
151 |
+
(0, target_width - y.shape[2])))(y)
|
152 |
+
# Side branches (processing the individual quadrants)
|
153 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
154 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
155 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
156 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
157 |
+
|
158 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
159 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
160 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
161 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
162 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
163 |
+
|
164 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
165 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
166 |
+
|
167 |
+
# Additional convolution layer to combine the fused features
|
168 |
+
x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
169 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
170 |
+
# Final dense layer for further processing
|
171 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
172 |
+
|
173 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
174 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
175 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
176 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
177 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
178 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
179 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
180 |
+
|
181 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
182 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
183 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
184 |
+
|
185 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
186 |
+
|
187 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
188 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
189 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
190 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
191 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
192 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
193 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
194 |
+
|
195 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
196 |
+
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
197 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
198 |
+
# Create and return the model
|
199 |
+
x=layers.Flatten()(x)
|
200 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
201 |
+
return model
|
202 |
+
|
203 |
+
# Define input shape (batch_size, height, width, channels)
|
204 |
+
# input_shape = (95, 95, 8) # Example input shape (95x95 spatial resolution, 3 channels)
|
205 |
+
|
206 |
+
# # Build the model
|
207 |
+
# model = radial_structure_subnet(input_shape)
|
208 |
+
|
209 |
+
# # Model summary
|
210 |
+
# model.summary()
|
211 |
+
|
212 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
213 |
+
# Define the input layer
|
214 |
+
input_tensor = layers.Input(shape=input_shape)
|
215 |
+
|
216 |
+
# Convolutional layer
|
217 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
218 |
+
x = layers.BatchNormalization()(x)
|
219 |
+
x = layers.ReLU()(x)
|
220 |
+
|
221 |
+
# Flatten layer
|
222 |
+
x = layers.Flatten()(x)
|
223 |
+
|
224 |
+
# Create the model
|
225 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
226 |
+
|
227 |
+
return model
|
228 |
+
|
229 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
230 |
+
|
231 |
+
def build_combined_model():
|
232 |
+
# Define input shapes
|
233 |
+
input_shape_3d = (8, 95, 95, 2)
|
234 |
+
input_shape_radial = (95, 95, 8)
|
235 |
+
input_shape_cnn = (8, 8, 1)
|
236 |
+
|
237 |
+
input_shape_latitude = (8,)
|
238 |
+
input_shape_longitude = (8,)
|
239 |
+
input_shape_other = (9,)
|
240 |
+
|
241 |
+
# Build individual models
|
242 |
+
model_3d = build_tgru_model(input_shape=input_shape_3d)
|
243 |
+
model_radial = radial_structure_subnet(input_shape=input_shape_radial)
|
244 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
245 |
+
|
246 |
+
# Define new inputs
|
247 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
248 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
249 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
250 |
+
|
251 |
+
# Flatten the additional inputs
|
252 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
253 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
254 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
255 |
+
|
256 |
+
# Combine all outputs
|
257 |
+
combined = layers.concatenate([
|
258 |
+
model_3d.output,
|
259 |
+
model_radial.output,
|
260 |
+
model_cnn.output,
|
261 |
+
flat_latitude,
|
262 |
+
flat_longitude,
|
263 |
+
flat_other
|
264 |
+
])
|
265 |
+
|
266 |
+
# Add dense layers for final processing
|
267 |
+
x = layers.Dense(128, activation='relu')(combined)
|
268 |
+
x = layers.Dense(1, activation=None)(x)
|
269 |
+
|
270 |
+
# Create the final model
|
271 |
+
final_model = models.Model(
|
272 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
273 |
+
input_latitude, input_longitude, input_other ],
|
274 |
+
outputs=x
|
275 |
+
)
|
276 |
+
|
277 |
+
return final_model
|
278 |
+
|
279 |
+
import h5py
|
280 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\Trj_GRU.h5", 'r') as f:
|
281 |
+
print(f.attrs.get('keras_version'))
|
282 |
+
print(f.attrs.get('backend'))
|
283 |
+
|
284 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
285 |
+
|
286 |
+
model = build_combined_model() # Your original model building function
|
287 |
+
# Rebuild the model architecture
|
288 |
+
model = build_tgru_model(input_shape=(8, 95, 95, 2))
|
289 |
+
|
290 |
+
# Build the model by calling it once (to initialize all weights)
|
291 |
+
dummy_input = tf.random.normal((1, 8, 95, 95, 2)) # batch_size=1
|
292 |
+
_ = model(dummy_input) # Forward pass to build all layers
|
293 |
+
|
294 |
+
# Now load the saved weights
|
295 |
+
# model.load_weights("Trj_GRU.weights.h5")
|
296 |
+
|
297 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\Trj_GRU.weights.h5")
|
298 |
+
|
299 |
+
|
300 |
+
def predict_trajgru(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
301 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
302 |
+
return y
|
unetlstm.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models # type: ignore
|
3 |
+
|
4 |
+
def encoder_block(inputs, filters):
|
5 |
+
x = layers.Conv3D(filters=filters, kernel_size=(3, 3, 4), padding="same", activation="relu")(inputs)
|
6 |
+
x = layers.BatchNormalization()(x)
|
7 |
+
return x
|
8 |
+
|
9 |
+
def convlstm_block(inputs, filters):
|
10 |
+
# Reshape to (timesteps, height, width, channels) for ConvLSTM
|
11 |
+
x = layers.Reshape((inputs.shape[1], inputs.shape[2], inputs.shape[3], inputs.shape[4]))(inputs)
|
12 |
+
x = layers.ConvLSTM2D(filters=filters, kernel_size=(3, 3), padding="same", return_sequences=True)(x)
|
13 |
+
x = layers.BatchNormalization()(x)
|
14 |
+
# Reshape back to 3D conv format
|
15 |
+
x = layers.Reshape((inputs.shape[1], inputs.shape[2], inputs.shape[3], filters))(x)
|
16 |
+
return x
|
17 |
+
|
18 |
+
def decoder_block(inputs, skip_connection, filters):
|
19 |
+
x = layers.Conv3DTranspose(filters=filters, kernel_size=(3, 3, 4), padding="same", activation="relu")(inputs)
|
20 |
+
x = layers.BatchNormalization()(x)
|
21 |
+
skip_resized = layers.Conv3D(filters, (1, 1, 1), padding="same")(skip_connection)
|
22 |
+
x = layers.Concatenate()([x, skip_resized])
|
23 |
+
x = layers.ConvLSTM2D(filters=filters, kernel_size=(3, 3), padding="same", return_sequences=True)(x)
|
24 |
+
return x
|
25 |
+
|
26 |
+
def build_unet_convlstm(input_shape=(8, 95, 95, 3)):
|
27 |
+
input_tensor = layers.Input(shape=input_shape)
|
28 |
+
|
29 |
+
# Encoder with ConvLSTM
|
30 |
+
skip1 = encoder_block(input_tensor, filters=8)
|
31 |
+
skip1 = convlstm_block(skip1, filters=8) # Added ConvLSTM
|
32 |
+
|
33 |
+
skip2 = encoder_block(skip1, filters=16)
|
34 |
+
skip2 = convlstm_block(skip2, filters=16) # Added ConvLSTM
|
35 |
+
|
36 |
+
# Bottleneck with ConvLSTM
|
37 |
+
x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3), padding="same", activation="relu")(skip2)
|
38 |
+
x = layers.BatchNormalization()(x)
|
39 |
+
x = convlstm_block(x, filters=32) # Bottleneck ConvLSTM
|
40 |
+
|
41 |
+
# Decoder
|
42 |
+
x = decoder_block(x, skip2, filters=16)
|
43 |
+
x = decoder_block(x, skip1, filters=8)
|
44 |
+
|
45 |
+
# Final Output Layer
|
46 |
+
x = layers.Conv3D(filters=1, kernel_size=(1, 1, 1), activation="relu")(x)
|
47 |
+
x = layers.GlobalAveragePooling3D()(x)
|
48 |
+
|
49 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
50 |
+
return model
|
51 |
+
|
52 |
+
|
53 |
+
import tensorflow as tf
|
54 |
+
from tensorflow.keras import layers, models # type: ignore
|
55 |
+
|
56 |
+
def RSTNet(input_shape):
|
57 |
+
"""
|
58 |
+
Creates the subnet for extracting TC radial structure features using a five-branch CNN design with 2D convolutions.
|
59 |
+
|
60 |
+
Parameters:
|
61 |
+
- input_shape: tuple, shape of the input data (e.g., (95, 95, 3))
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
- model: tf.keras.Model, the radial structure subnet model
|
65 |
+
"""
|
66 |
+
|
67 |
+
input_tensor = layers.Input(shape=input_shape)
|
68 |
+
|
69 |
+
# Divide input data into four quadrants (NW, NE, SW, SE)
|
70 |
+
# Assuming the input shape is (batch_size, height, width, channels)
|
71 |
+
|
72 |
+
# Quadrant extraction - using slicing to separate quadrants
|
73 |
+
nw_quadrant = input_tensor[:, :input_shape[0]//2, :input_shape[1]//2, :]
|
74 |
+
ne_quadrant = input_tensor[:, :input_shape[0]//2, input_shape[1]//2:, :]
|
75 |
+
sw_quadrant = input_tensor[:, input_shape[0]//2:, :input_shape[1]//2, :]
|
76 |
+
se_quadrant = input_tensor[:, input_shape[0]//2:, input_shape[1]//2:, :]
|
77 |
+
|
78 |
+
|
79 |
+
target_height = max(input_shape[0]//2, input_shape[0] - input_shape[0]//2) # 48
|
80 |
+
target_width = max(input_shape[1]//2, input_shape[1] - input_shape[1]//2) # 48
|
81 |
+
|
82 |
+
# Padding the quadrants to match the target size (48, 48)
|
83 |
+
nw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - nw_quadrant.shape[1]),
|
84 |
+
(0, target_width - nw_quadrant.shape[2])))(nw_quadrant)
|
85 |
+
ne_quadrant = layers.ZeroPadding2D(padding=((0, target_height - ne_quadrant.shape[1]),
|
86 |
+
(0, target_width - ne_quadrant.shape[2])))(ne_quadrant)
|
87 |
+
sw_quadrant = layers.ZeroPadding2D(padding=((0, target_height - sw_quadrant.shape[1]),
|
88 |
+
(0, target_width - sw_quadrant.shape[2])))(sw_quadrant)
|
89 |
+
se_quadrant = layers.ZeroPadding2D(padding=((0, target_height - se_quadrant.shape[1]),
|
90 |
+
(0, target_width - se_quadrant.shape[2])))(se_quadrant)
|
91 |
+
|
92 |
+
print(nw_quadrant.shape)
|
93 |
+
print(ne_quadrant.shape)
|
94 |
+
print(sw_quadrant.shape)
|
95 |
+
print(se_quadrant.shape)
|
96 |
+
# Main branch (processing the entire structure)
|
97 |
+
main_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(input_tensor)
|
98 |
+
y=layers.MaxPool2D()(main_branch)
|
99 |
+
|
100 |
+
y = layers.ZeroPadding2D(padding=((0, target_height - y.shape[1]),
|
101 |
+
(0, target_width - y.shape[2])))(y)
|
102 |
+
# Side branches (processing the individual quadrants)
|
103 |
+
nw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(nw_quadrant)
|
104 |
+
ne_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(ne_quadrant)
|
105 |
+
sw_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(sw_quadrant)
|
106 |
+
se_branch = layers.Conv2D(filters=8, kernel_size=(3, 3), padding='same', activation='relu')(se_quadrant)
|
107 |
+
|
108 |
+
# Apply padding to the side branches to match the dimensions of the main branch
|
109 |
+
# nw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(nw_branch)
|
110 |
+
# ne_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(ne_branch)
|
111 |
+
# sw_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(sw_branch)
|
112 |
+
# se_branch = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(se_branch)
|
113 |
+
|
114 |
+
# Fusion operations (concatenate the outputs from the main branch and side branches)
|
115 |
+
fusion = layers.concatenate([y, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
116 |
+
|
117 |
+
# Additional convolution layer to combine the fused features
|
118 |
+
# x = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
119 |
+
x=layers.Reshape((1, 48, 48, 40))(fusion)
|
120 |
+
x = layers.ConvLSTM2D(filters=16, kernel_size=(3, 3), padding="same", return_sequences=True)(x)
|
121 |
+
x=layers.Reshape((48, 48, 16))(x)
|
122 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
123 |
+
# Final dense layer for further processing
|
124 |
+
nw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
125 |
+
|
126 |
+
ne_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
127 |
+
sw_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
128 |
+
se_branch = layers.Conv2D(filters=16, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
129 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
130 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
131 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
132 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
133 |
+
|
134 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
135 |
+
# x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(fusion)
|
136 |
+
x=layers.Reshape((1, 24, 24, 80))(fusion)
|
137 |
+
x = layers.ConvLSTM2D(filters=32, kernel_size=(3, 3), padding="same", return_sequences=True)(x)
|
138 |
+
x=layers.Reshape((24, 24, 32))(x)
|
139 |
+
x=layers.MaxPool2D(pool_size=(2, 2))(x)
|
140 |
+
|
141 |
+
nw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(nw_branch)
|
142 |
+
|
143 |
+
ne_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(ne_branch)
|
144 |
+
sw_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(sw_branch)
|
145 |
+
se_branch = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(se_branch)
|
146 |
+
nw_branch = layers.MaxPool2D(pool_size=(2, 2))(nw_branch)
|
147 |
+
ne_branch = layers.MaxPool2D(pool_size=(2, 2))(ne_branch)
|
148 |
+
sw_branch = layers.MaxPool2D(pool_size=(2, 2))(sw_branch)
|
149 |
+
se_branch = layers.MaxPool2D(pool_size=(2, 2))(se_branch)
|
150 |
+
|
151 |
+
fusion = layers.concatenate([x, nw_branch, ne_branch, sw_branch, se_branch], axis=-1)
|
152 |
+
# x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(fusion)
|
153 |
+
x=layers.Reshape((1,12, 12, 160))(fusion)
|
154 |
+
x = layers.ConvLSTM2D(filters=32, kernel_size=(3, 3), padding="same", return_sequences=True)(x)
|
155 |
+
x=layers.Reshape((12, 12, 32))(x)
|
156 |
+
x=layers.Conv2D(filters=32, kernel_size=(3, 3), activation=None)(x)
|
157 |
+
# Create and return the model
|
158 |
+
x=layers.Flatten()(x)
|
159 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
160 |
+
return model
|
161 |
+
|
162 |
+
from tensorflow.keras import layers, models # type: ignore
|
163 |
+
|
164 |
+
def build_cnn_model(input_shape=(8, 8, 1)):
|
165 |
+
# Define the input layer
|
166 |
+
input_tensor = layers.Input(shape=input_shape)
|
167 |
+
|
168 |
+
# Convolutional layer
|
169 |
+
x = layers.Conv2D(64, (3, 3), padding='same')(input_tensor)
|
170 |
+
x = layers.BatchNormalization()(x)
|
171 |
+
x = layers.ReLU()(x)
|
172 |
+
|
173 |
+
# Flatten layer
|
174 |
+
x = layers.Flatten()(x)
|
175 |
+
|
176 |
+
# Create the model
|
177 |
+
model = models.Model(inputs=input_tensor, outputs=x)
|
178 |
+
|
179 |
+
return model
|
180 |
+
|
181 |
+
from tensorflow.keras import layers, models, Input # type: ignore
|
182 |
+
|
183 |
+
def build_combined_model():
|
184 |
+
# Define input shapes
|
185 |
+
input_shape_3d = (8, 95, 95, 2)
|
186 |
+
input_shape_radial = (95, 95, 8)
|
187 |
+
input_shape_cnn = (8, 8, 1)
|
188 |
+
|
189 |
+
input_shape_latitude = (8,)
|
190 |
+
input_shape_longitude = (8,)
|
191 |
+
input_shape_other = (9,)
|
192 |
+
|
193 |
+
# Build individual models
|
194 |
+
model_3d = build_unet_convlstm(input_shape=input_shape_3d)
|
195 |
+
model_radial = RSTNet(input_shape=input_shape_radial)
|
196 |
+
model_cnn = build_cnn_model(input_shape=input_shape_cnn)
|
197 |
+
|
198 |
+
# Define new inputs
|
199 |
+
input_latitude = Input(shape=input_shape_latitude ,name="latitude_input")
|
200 |
+
input_longitude = Input(shape=input_shape_longitude, name="longitude_input")
|
201 |
+
input_other = Input(shape=input_shape_other, name="other_input")
|
202 |
+
|
203 |
+
# Flatten the additional inputs
|
204 |
+
flat_latitude = layers.Dense(32,activation='relu')(input_latitude)
|
205 |
+
flat_longitude = layers.Dense(32,activation='relu')(input_longitude)
|
206 |
+
flat_other = layers.Dense(64,activation='relu')(input_other)
|
207 |
+
|
208 |
+
# Combine all outputs
|
209 |
+
combined = layers.concatenate([
|
210 |
+
model_3d.output,
|
211 |
+
model_radial.output,
|
212 |
+
model_cnn.output,
|
213 |
+
flat_latitude,
|
214 |
+
flat_longitude,
|
215 |
+
flat_other
|
216 |
+
])
|
217 |
+
|
218 |
+
# Add dense layers for final processing
|
219 |
+
x = layers.Dense(128, activation='relu')(combined)
|
220 |
+
x = layers.Dense(1, activation=None)(x)
|
221 |
+
|
222 |
+
# Create the final model
|
223 |
+
final_model = models.Model(
|
224 |
+
inputs=[model_3d.input, model_radial.input, model_cnn.input,
|
225 |
+
input_latitude, input_longitude, input_other ],
|
226 |
+
outputs=x
|
227 |
+
)
|
228 |
+
|
229 |
+
return final_model
|
230 |
+
|
231 |
+
import h5py
|
232 |
+
with h5py.File(r"E:\1MAIN PROJECT\tf_env\final_model.h5", 'r') as f:
|
233 |
+
print(f.attrs.get('keras_version'))
|
234 |
+
print(f.attrs.get('backend'))
|
235 |
+
print("Model layers:", list(f['model_weights'].keys()))
|
236 |
+
|
237 |
+
model = build_combined_model() # Your original model building function
|
238 |
+
model.load_weights(r"E:\1MAIN PROJECT\tf_env\final_model.h5")
|
239 |
+
|
240 |
+
|
241 |
+
def predict_unetlstm(reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test):
|
242 |
+
y=model.predict([reduced_images_test,hov_m_test,test_vmax_3d,lat_test,lon_test,int_diff_test ])
|
243 |
+
return y
|