Update app.py
Browse files
app.py
CHANGED
@@ -1,344 +1,375 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import numpy as np
|
3 |
-
from PIL import Image
|
4 |
-
import cv2
|
5 |
-
from scipy.ndimage import gaussian_filter
|
6 |
-
|
7 |
-
# ------------------ TC CENTERING UTILS ------------------
|
8 |
-
|
9 |
-
def find_tc_center(ir_image, smoothing_sigma=3):
|
10 |
-
smoothed_image = gaussian_filter(ir_image, sigma=smoothing_sigma)
|
11 |
-
min_coords = np.unravel_index(np.argmin(smoothed_image), smoothed_image.shape)
|
12 |
-
return min_coords[::-1] # Return as (x, y)
|
13 |
-
|
14 |
-
def extract_local_region(ir_image, center, region_size=95):
|
15 |
-
h, w = ir_image.shape
|
16 |
-
half_size = region_size // 2
|
17 |
-
x_min = max(center[0] - half_size, 0)
|
18 |
-
x_max = min(center[0] + half_size, w)
|
19 |
-
y_min = max(center[1] - half_size, 0)
|
20 |
-
y_max = min(center[1] + half_size, h)
|
21 |
-
region = np.full((region_size, region_size), np.nan)
|
22 |
-
extracted = ir_image[y_min:y_max, x_min:x_max]
|
23 |
-
region[:extracted.shape[0], :extracted.shape[1]] = extracted
|
24 |
-
return region
|
25 |
-
|
26 |
-
def generate_hovmoller(X_data):
|
27 |
-
hovmoller_list = []
|
28 |
-
for ir_images in X_data: # ir_images: shape (8, 95, 95)
|
29 |
-
time_steps = ir_images.shape[0]
|
30 |
-
hovmoller_data = np.zeros((time_steps, 95, 95))
|
31 |
-
for t in range(time_steps):
|
32 |
-
tc_center = find_tc_center(ir_images[t])
|
33 |
-
hovmoller_data[t] = extract_local_region(ir_images[t], tc_center, 95)
|
34 |
-
hovmoller_list.append(hovmoller_data)
|
35 |
-
return np.array(hovmoller_list)
|
36 |
-
|
37 |
-
def reshape_vmax(vmax_values, chunk_size=8):
|
38 |
-
trimmed_size = (len(vmax_values) // chunk_size) * chunk_size
|
39 |
-
vmax_values_trimmed = vmax_values[:trimmed_size]
|
40 |
-
return vmax_values_trimmed.reshape(-1, chunk_size)
|
41 |
-
def create_3d_vmax(vmax_2d_array):
|
42 |
-
# Initialize a 3D array of shape (N, 8, 8) filled with zeros
|
43 |
-
vmax_3d_array = np.zeros((vmax_2d_array.shape[0], 8, 8))
|
44 |
-
|
45 |
-
# Fill the diagonal for each row in the 3D array
|
46 |
-
for i in range(vmax_2d_array.shape[0]):
|
47 |
-
np.fill_diagonal(vmax_3d_array[i], vmax_2d_array[i])
|
48 |
-
|
49 |
-
# Reshape to (N*10, 8, 8, 1) and remove the last element
|
50 |
-
vmax_3d_array = vmax_3d_array.reshape(-1, 8, 8, 1)
|
51 |
-
# Trim last element
|
52 |
-
|
53 |
-
return vmax_3d_array
|
54 |
-
|
55 |
-
def process_lat_values(data):
|
56 |
-
lat_values = data # Convert to NumPy array
|
57 |
-
|
58 |
-
# Trim the array to make its length divisible by 8
|
59 |
-
trimmed_size = (len(lat_values) // 8) * 8
|
60 |
-
lat_values_trimmed = lat_values[:trimmed_size]
|
61 |
-
lat_values_trimmed=np.array(lat_values_trimmed) # Convert to NumPy array
|
62 |
-
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
63 |
-
lat_2d_array = lat_values_trimmed.reshape(-1, 8)
|
64 |
-
|
65 |
-
return lat_2d_array
|
66 |
-
|
67 |
-
def process_lon_values(data):
|
68 |
-
lon_values =data # Convert to NumPy array
|
69 |
-
lon_values = np.array(lon_values) # Convert to NumPy array
|
70 |
-
# Trim the array to make its length divisible by 8
|
71 |
-
trimmed_size = (len(lon_values) // 8) * 8
|
72 |
-
lon_values_trimmed = lon_values[:trimmed_size]
|
73 |
-
|
74 |
-
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
75 |
-
lon_2d_array = lon_values_trimmed.reshape(-1, 8)
|
76 |
-
|
77 |
-
return lon_2d_array
|
78 |
-
|
79 |
-
import numpy as np
|
80 |
-
|
81 |
-
def calculate_intensity_difference(vmax_2d_array):
|
82 |
-
"""Calculates intensity difference for each row in Vmax 2D array."""
|
83 |
-
int_diff = []
|
84 |
-
|
85 |
-
for i in vmax_2d_array:
|
86 |
-
k = abs(i[0] - i[-1]) # Absolute difference between first & last element
|
87 |
-
i = np.append(i, k) # Append difference as the 9th element
|
88 |
-
int_diff.append(i)
|
89 |
-
|
90 |
-
return np.array(int_diff)
|
91 |
-
|
92 |
-
import numpy as np
|
93 |
-
|
94 |
-
# Function to process and reshape image data
|
95 |
-
def process_images(images, batch_size=8, img_size=(95, 95, 1)):
|
96 |
-
num_images = images.shape[0]
|
97 |
-
|
98 |
-
# Trim the dataset to make it divisible by batch_size
|
99 |
-
trimmed_size = (num_images // batch_size) * batch_size
|
100 |
-
images_trimmed = images[:trimmed_size]
|
101 |
-
|
102 |
-
# Reshape into (x, batch_size, img_size[0], img_size[1], img_size[2])
|
103 |
-
images_reshaped = images_trimmed.reshape(-1, batch_size, *img_size)
|
104 |
-
|
105 |
-
return images_reshaped
|
106 |
-
|
107 |
-
import numpy as np
|
108 |
-
|
109 |
-
def process_cc_mask(cc_data):
|
110 |
-
"""Processes CC mask images by trimming and reshaping into (x, 8, 95, 95, 1)."""
|
111 |
-
num_images = cc_data.shape[0]
|
112 |
-
batch_size = 8
|
113 |
-
trimmed_size = (num_images // batch_size) * batch_size # Ensure divisibility by 8
|
114 |
-
|
115 |
-
images_trimmed = cc_data[:trimmed_size] # Trim excess images
|
116 |
-
cc_images = images_trimmed.reshape(-1, batch_size, 95, 95, 1) # Reshape
|
117 |
-
|
118 |
-
return cc_images
|
119 |
-
def extract_convective_cores(ir_data):
|
120 |
-
"""
|
121 |
-
Extract Convective Cores (CCs) from IR imagery based on the criteria in the paper.
|
122 |
-
Args:
|
123 |
-
ir_data: IR imagery of shape (height, width).
|
124 |
-
Returns:
|
125 |
-
cc_mask: Binary mask of CCs (1 for CC, 0 otherwise) of shape (height, width).
|
126 |
-
"""
|
127 |
-
height, width,c = ir_data.shape
|
128 |
-
cc_mask = np.zeros_like(ir_data, dtype=np.float32) # Initialize CC mask
|
129 |
-
|
130 |
-
# Define the neighborhood (8-connected)
|
131 |
-
neighbors = [(-1, -1), (-1, 0), (-1, 1),
|
132 |
-
(0, -1), (0,0) , (0, 1),
|
133 |
-
(1, -1), (1, 0), (1, 1)]
|
134 |
-
|
135 |
-
for i in range(1, height - 1): # Avoid boundary pixels
|
136 |
-
for j in range(1, width - 1):
|
137 |
-
bt_ij = ir_data[i, j]
|
138 |
-
|
139 |
-
# Condition 1: BT < 253K
|
140 |
-
if (bt_ij >= 253).any():
|
141 |
-
continue
|
142 |
-
|
143 |
-
# Condition 2: BT <= BT_n for all neighbors
|
144 |
-
is_local_min = True
|
145 |
-
for di, dj in neighbors:
|
146 |
-
if ir_data[i + di, j + dj] < bt_ij:
|
147 |
-
is_local_min = False
|
148 |
-
break
|
149 |
-
if not is_local_min:
|
150 |
-
continue
|
151 |
-
|
152 |
-
# Condition 3: Gradient condition
|
153 |
-
numerator1 = (ir_data[i - 1, j] + ir_data[i + 1, j] - 2 * bt_ij) / 3.1
|
154 |
-
numerator2 = (ir_data[i, j - 1] + ir_data[i, j + 1] - 2 * bt_ij) / 8.0
|
155 |
-
lhs = numerator1 + numerator2
|
156 |
-
rhs = (4 / 5.8) * np.exp(0.0826 * (bt_ij - 217))
|
157 |
-
|
158 |
-
if lhs > rhs:
|
159 |
-
cc_mask[i, j] = 1 # Mark as CC
|
160 |
-
|
161 |
-
return cc_mask
|
162 |
-
|
163 |
-
def compute_convective_core_masks(ir_data):
|
164 |
-
"""Extracts convective core masks for each IR image."""
|
165 |
-
cc_mask = []
|
166 |
-
|
167 |
-
for i in ir_data:
|
168 |
-
c = extract_convective_cores(i) # Assuming this function is defined
|
169 |
-
c = np.array(c)
|
170 |
-
cc_mask.append(c)
|
171 |
-
|
172 |
-
return np.array(cc_mask)
|
173 |
-
|
174 |
-
|
175 |
-
# ------------------ Streamlit UI ------------------
|
176 |
-
st.set_page_config(page_title="TCIR Daily Input", layout="wide")
|
177 |
-
|
178 |
-
st.title("Tropical Cyclone Input Uploader (8 sets/day)")
|
179 |
-
|
180 |
-
ir_images = st.file_uploader("Upload 8 IR images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
181 |
-
pmw_images = st.file_uploader("Upload 8 PMW images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
182 |
-
|
183 |
-
if len(ir_images) != 8 or len(pmw_images) != 8:
|
184 |
-
st.warning("Please upload exactly 8 IR and 8 PMW images.")
|
185 |
-
else:
|
186 |
-
st.success("Uploaded 8 IR and 8 PMW images successfully.")
|
187 |
-
|
188 |
-
st.header("Input Latitude, Longitude, Vmax")
|
189 |
-
lat_values, lon_values, vmax_values = [], [], []
|
190 |
-
|
191 |
-
col1, col2, col3 = st.columns(3)
|
192 |
-
with col1:
|
193 |
-
|
194 |
-
|
195 |
-
with col2:
|
196 |
-
|
197 |
-
|
198 |
-
with col3:
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
)
|
207 |
-
|
208 |
-
if
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
#
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
#
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
# st.write(
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
#
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
# st.write("
|
298 |
-
#
|
299 |
-
|
300 |
-
#
|
301 |
-
#
|
302 |
-
#
|
303 |
-
#
|
304 |
-
|
305 |
-
#
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import cv2
|
5 |
+
from scipy.ndimage import gaussian_filter
|
6 |
+
|
7 |
+
# ------------------ TC CENTERING UTILS ------------------
|
8 |
+
|
9 |
+
def find_tc_center(ir_image, smoothing_sigma=3):
|
10 |
+
smoothed_image = gaussian_filter(ir_image, sigma=smoothing_sigma)
|
11 |
+
min_coords = np.unravel_index(np.argmin(smoothed_image), smoothed_image.shape)
|
12 |
+
return min_coords[::-1] # Return as (x, y)
|
13 |
+
|
14 |
+
def extract_local_region(ir_image, center, region_size=95):
|
15 |
+
h, w = ir_image.shape
|
16 |
+
half_size = region_size // 2
|
17 |
+
x_min = max(center[0] - half_size, 0)
|
18 |
+
x_max = min(center[0] + half_size, w)
|
19 |
+
y_min = max(center[1] - half_size, 0)
|
20 |
+
y_max = min(center[1] + half_size, h)
|
21 |
+
region = np.full((region_size, region_size), np.nan)
|
22 |
+
extracted = ir_image[y_min:y_max, x_min:x_max]
|
23 |
+
region[:extracted.shape[0], :extracted.shape[1]] = extracted
|
24 |
+
return region
|
25 |
+
|
26 |
+
def generate_hovmoller(X_data):
|
27 |
+
hovmoller_list = []
|
28 |
+
for ir_images in X_data: # ir_images: shape (8, 95, 95)
|
29 |
+
time_steps = ir_images.shape[0]
|
30 |
+
hovmoller_data = np.zeros((time_steps, 95, 95))
|
31 |
+
for t in range(time_steps):
|
32 |
+
tc_center = find_tc_center(ir_images[t])
|
33 |
+
hovmoller_data[t] = extract_local_region(ir_images[t], tc_center, 95)
|
34 |
+
hovmoller_list.append(hovmoller_data)
|
35 |
+
return np.array(hovmoller_list)
|
36 |
+
|
37 |
+
def reshape_vmax(vmax_values, chunk_size=8):
|
38 |
+
trimmed_size = (len(vmax_values) // chunk_size) * chunk_size
|
39 |
+
vmax_values_trimmed = vmax_values[:trimmed_size]
|
40 |
+
return vmax_values_trimmed.reshape(-1, chunk_size)
|
41 |
+
def create_3d_vmax(vmax_2d_array):
|
42 |
+
# Initialize a 3D array of shape (N, 8, 8) filled with zeros
|
43 |
+
vmax_3d_array = np.zeros((vmax_2d_array.shape[0], 8, 8))
|
44 |
+
|
45 |
+
# Fill the diagonal for each row in the 3D array
|
46 |
+
for i in range(vmax_2d_array.shape[0]):
|
47 |
+
np.fill_diagonal(vmax_3d_array[i], vmax_2d_array[i])
|
48 |
+
|
49 |
+
# Reshape to (N*10, 8, 8, 1) and remove the last element
|
50 |
+
vmax_3d_array = vmax_3d_array.reshape(-1, 8, 8, 1)
|
51 |
+
# Trim last element
|
52 |
+
|
53 |
+
return vmax_3d_array
|
54 |
+
|
55 |
+
def process_lat_values(data):
|
56 |
+
lat_values = data # Convert to NumPy array
|
57 |
+
|
58 |
+
# Trim the array to make its length divisible by 8
|
59 |
+
trimmed_size = (len(lat_values) // 8) * 8
|
60 |
+
lat_values_trimmed = lat_values[:trimmed_size]
|
61 |
+
lat_values_trimmed=np.array(lat_values_trimmed) # Convert to NumPy array
|
62 |
+
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
63 |
+
lat_2d_array = lat_values_trimmed.reshape(-1, 8)
|
64 |
+
|
65 |
+
return lat_2d_array
|
66 |
+
|
67 |
+
def process_lon_values(data):
|
68 |
+
lon_values =data # Convert to NumPy array
|
69 |
+
lon_values = np.array(lon_values) # Convert to NumPy array
|
70 |
+
# Trim the array to make its length divisible by 8
|
71 |
+
trimmed_size = (len(lon_values) // 8) * 8
|
72 |
+
lon_values_trimmed = lon_values[:trimmed_size]
|
73 |
+
|
74 |
+
# Reshape into a 2D array (rows of 8 values each) and remove the last row
|
75 |
+
lon_2d_array = lon_values_trimmed.reshape(-1, 8)
|
76 |
+
|
77 |
+
return lon_2d_array
|
78 |
+
|
79 |
+
import numpy as np
|
80 |
+
|
81 |
+
def calculate_intensity_difference(vmax_2d_array):
|
82 |
+
"""Calculates intensity difference for each row in Vmax 2D array."""
|
83 |
+
int_diff = []
|
84 |
+
|
85 |
+
for i in vmax_2d_array:
|
86 |
+
k = abs(i[0] - i[-1]) # Absolute difference between first & last element
|
87 |
+
i = np.append(i, k) # Append difference as the 9th element
|
88 |
+
int_diff.append(i)
|
89 |
+
|
90 |
+
return np.array(int_diff)
|
91 |
+
|
92 |
+
import numpy as np
|
93 |
+
|
94 |
+
# Function to process and reshape image data
|
95 |
+
def process_images(images, batch_size=8, img_size=(95, 95, 1)):
|
96 |
+
num_images = images.shape[0]
|
97 |
+
|
98 |
+
# Trim the dataset to make it divisible by batch_size
|
99 |
+
trimmed_size = (num_images // batch_size) * batch_size
|
100 |
+
images_trimmed = images[:trimmed_size]
|
101 |
+
|
102 |
+
# Reshape into (x, batch_size, img_size[0], img_size[1], img_size[2])
|
103 |
+
images_reshaped = images_trimmed.reshape(-1, batch_size, *img_size)
|
104 |
+
|
105 |
+
return images_reshaped
|
106 |
+
|
107 |
+
import numpy as np
|
108 |
+
|
109 |
+
def process_cc_mask(cc_data):
|
110 |
+
"""Processes CC mask images by trimming and reshaping into (x, 8, 95, 95, 1)."""
|
111 |
+
num_images = cc_data.shape[0]
|
112 |
+
batch_size = 8
|
113 |
+
trimmed_size = (num_images // batch_size) * batch_size # Ensure divisibility by 8
|
114 |
+
|
115 |
+
images_trimmed = cc_data[:trimmed_size] # Trim excess images
|
116 |
+
cc_images = images_trimmed.reshape(-1, batch_size, 95, 95, 1) # Reshape
|
117 |
+
|
118 |
+
return cc_images
|
119 |
+
def extract_convective_cores(ir_data):
|
120 |
+
"""
|
121 |
+
Extract Convective Cores (CCs) from IR imagery based on the criteria in the paper.
|
122 |
+
Args:
|
123 |
+
ir_data: IR imagery of shape (height, width).
|
124 |
+
Returns:
|
125 |
+
cc_mask: Binary mask of CCs (1 for CC, 0 otherwise) of shape (height, width).
|
126 |
+
"""
|
127 |
+
height, width,c = ir_data.shape
|
128 |
+
cc_mask = np.zeros_like(ir_data, dtype=np.float32) # Initialize CC mask
|
129 |
+
|
130 |
+
# Define the neighborhood (8-connected)
|
131 |
+
neighbors = [(-1, -1), (-1, 0), (-1, 1),
|
132 |
+
(0, -1), (0,0) , (0, 1),
|
133 |
+
(1, -1), (1, 0), (1, 1)]
|
134 |
+
|
135 |
+
for i in range(1, height - 1): # Avoid boundary pixels
|
136 |
+
for j in range(1, width - 1):
|
137 |
+
bt_ij = ir_data[i, j]
|
138 |
+
|
139 |
+
# Condition 1: BT < 253K
|
140 |
+
if (bt_ij >= 253).any():
|
141 |
+
continue
|
142 |
+
|
143 |
+
# Condition 2: BT <= BT_n for all neighbors
|
144 |
+
is_local_min = True
|
145 |
+
for di, dj in neighbors:
|
146 |
+
if ir_data[i + di, j + dj] < bt_ij:
|
147 |
+
is_local_min = False
|
148 |
+
break
|
149 |
+
if not is_local_min:
|
150 |
+
continue
|
151 |
+
|
152 |
+
# Condition 3: Gradient condition
|
153 |
+
numerator1 = (ir_data[i - 1, j] + ir_data[i + 1, j] - 2 * bt_ij) / 3.1
|
154 |
+
numerator2 = (ir_data[i, j - 1] + ir_data[i, j + 1] - 2 * bt_ij) / 8.0
|
155 |
+
lhs = numerator1 + numerator2
|
156 |
+
rhs = (4 / 5.8) * np.exp(0.0826 * (bt_ij - 217))
|
157 |
+
|
158 |
+
if lhs > rhs:
|
159 |
+
cc_mask[i, j] = 1 # Mark as CC
|
160 |
+
|
161 |
+
return cc_mask
|
162 |
+
|
163 |
+
def compute_convective_core_masks(ir_data):
|
164 |
+
"""Extracts convective core masks for each IR image."""
|
165 |
+
cc_mask = []
|
166 |
+
|
167 |
+
for i in ir_data:
|
168 |
+
c = extract_convective_cores(i) # Assuming this function is defined
|
169 |
+
c = np.array(c)
|
170 |
+
cc_mask.append(c)
|
171 |
+
|
172 |
+
return np.array(cc_mask)
|
173 |
+
|
174 |
+
|
175 |
+
# ------------------ Streamlit UI ------------------
|
176 |
+
st.set_page_config(page_title="TCIR Daily Input", layout="wide")
|
177 |
+
|
178 |
+
st.title("Tropical Cyclone Input Uploader (8 sets/day)")
|
179 |
+
|
180 |
+
ir_images = st.file_uploader("Upload 8 IR images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
181 |
+
pmw_images = st.file_uploader("Upload 8 PMW images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
182 |
+
|
183 |
+
if len(ir_images) != 8 or len(pmw_images) != 8:
|
184 |
+
st.warning("Please upload exactly 8 IR and 8 PMW images.")
|
185 |
+
else:
|
186 |
+
st.success("Uploaded 8 IR and 8 PMW images successfully.")
|
187 |
+
|
188 |
+
st.header("Input Latitude, Longitude, Vmax")
|
189 |
+
lat_values, lon_values, vmax_values = [], [], []
|
190 |
+
|
191 |
+
# col1, col2, col3 = st.columns(3)
|
192 |
+
# with col1:
|
193 |
+
# for i in range(8):
|
194 |
+
# lat_values.append(st.number_input(f"Latitude {i+1}", key=f"lat{i}"))
|
195 |
+
# with col2:
|
196 |
+
# for i in range(8):
|
197 |
+
# lon_values.append(st.number_input(f"Longitude {i+1}", key=f"lon{i}"))
|
198 |
+
# with col3:
|
199 |
+
# for i in range(8):
|
200 |
+
# vmax_values.append(st.number_input(f"Vmax {i+1}", key=f"vmax{i}"))
|
201 |
+
import pandas as pd
|
202 |
+
|
203 |
+
import numpy as np
|
204 |
+
|
205 |
+
# File uploader
|
206 |
+
csv_file = st.file_uploader("Upload CSV file", type=["csv"])
|
207 |
+
|
208 |
+
if csv_file is not None:
|
209 |
+
try:
|
210 |
+
df = pd.read_csv(csv_file)
|
211 |
+
required_columns = {'Latitude', 'Longitude', 'Vmax'}
|
212 |
+
|
213 |
+
if required_columns.issubset(df.columns):
|
214 |
+
lat_values = df['Latitude'].values
|
215 |
+
lon_values = df['Longitude'].values
|
216 |
+
vmax_values = df['Vmax'].values
|
217 |
+
|
218 |
+
lat_values = np.array(lat_values)
|
219 |
+
lon_values = np.array(lon_values)
|
220 |
+
vmax_values = np.array(vmax_values)
|
221 |
+
|
222 |
+
st.success("CSV file loaded and processed successfully!")
|
223 |
+
# Optional: Show dataframe or data shape
|
224 |
+
st.write(df.head())
|
225 |
+
|
226 |
+
else:
|
227 |
+
st.error("CSV file must contain 'Latitude', 'Longitude', and 'Vmax' columns.")
|
228 |
+
except Exception as e:
|
229 |
+
st.error(f"Error reading CSV: {e}")
|
230 |
+
else:
|
231 |
+
st.warning("Please upload a CSV file.")
|
232 |
+
st.header("Select Prediction Model")
|
233 |
+
model_choice = st.selectbox(
|
234 |
+
"Choose a model for prediction",
|
235 |
+
("ConvGRU", "ConvLSTM", "Traj-GRU","3DCNN","spatiotemporalLSTM","Unet_LSTM"),
|
236 |
+
index=0
|
237 |
+
)
|
238 |
+
# ------------------ Process Button ------------------
|
239 |
+
if st.button("Submit for Processing"):
|
240 |
+
|
241 |
+
if len(ir_images) == 8 and len(pmw_images) == 8:
|
242 |
+
# st.success("Starting preprocessing...")
|
243 |
+
if model_choice == "Unet_LSTM":
|
244 |
+
from unetlstm import predict_unetlstm
|
245 |
+
model_predict_fn = predict_unetlstm
|
246 |
+
elif model_choice == "ConvGRU":
|
247 |
+
from gru_model import predict
|
248 |
+
model_predict_fn = predict
|
249 |
+
elif model_choice == "ConvLSTM":
|
250 |
+
from convlstm import predict_lstm
|
251 |
+
model_predict_fn = predict_lstm
|
252 |
+
elif model_choice == "3DCNN":
|
253 |
+
from cnn3d import predict_3dcnn
|
254 |
+
model_predict_fn = predict_3dcnn
|
255 |
+
elif model_choice == "Traj-GRU":
|
256 |
+
from trjgru import predict_trajgru
|
257 |
+
model_predict_fn = predict_trajgru
|
258 |
+
elif model_choice == "spatiotemporalLSTM":
|
259 |
+
from spaio_temp import predict_stlstm
|
260 |
+
model_predict_fn = predict_stlstm
|
261 |
+
# from gru_model import predict
|
262 |
+
# from convlstm import predict_lstm
|
263 |
+
# from cnn3d import predict_3dcnn
|
264 |
+
# from trjgru import predict_trajgru
|
265 |
+
# from spaio_temp import predict_stlstm
|
266 |
+
# from unetlstm import predict_unetlstm
|
267 |
+
ir_arrays = []
|
268 |
+
pmw_arrays = []
|
269 |
+
train_vmax_2d = reshape_vmax(np.array(vmax_values))
|
270 |
+
# st.write("Vmax 2D shape:", train_vmax_2d.shape)
|
271 |
+
train_vmax_3d= create_3d_vmax(train_vmax_2d)
|
272 |
+
# st.write("Vmax 3D shape:", train_vmax_3d.shape)
|
273 |
+
lat_processed = process_lat_values(lat_values)
|
274 |
+
lon_processed = process_lon_values(lon_values)
|
275 |
+
# st.write("Lat 2D shape:", lat_processed.shape)
|
276 |
+
# st.write("Lon 2D shape:", lon_processed.shape)
|
277 |
+
v_max_diff = calculate_intensity_difference(train_vmax_2d)
|
278 |
+
# st.write("Vmax Intensity Difference shape:", v_max_diff.shape)
|
279 |
+
for ir in ir_images:
|
280 |
+
img = Image.open(ir).convert("L")
|
281 |
+
arr = np.array(img).astype(np.float32)
|
282 |
+
bt_arr = (arr / 255.0) * (310 - 190) + 190
|
283 |
+
resized = cv2.resize(bt_arr, (95, 95), interpolation=cv2.INTER_CUBIC)
|
284 |
+
ir_arrays.append(resized)
|
285 |
+
|
286 |
+
for pmw in pmw_images:
|
287 |
+
img = Image.open(pmw).convert("L")
|
288 |
+
arr = np.array(img).astype(np.float32) / 255.0
|
289 |
+
resized = cv2.resize(arr, (95, 95), interpolation=cv2.INTER_CUBIC)
|
290 |
+
pmw_arrays.append(resized)
|
291 |
+
ir=np.array(ir_arrays)
|
292 |
+
pmw=np.array(pmw_arrays)
|
293 |
+
# Stack into (8, 95, 95)
|
294 |
+
ir_seq = process_images(ir)
|
295 |
+
pmw_seq = process_images(pmw)
|
296 |
+
|
297 |
+
# st.write(f"IR sequence shape: {ir_seq.shape}")
|
298 |
+
# st.write(f"PMW sequence shape: {pmw_seq.shape}")
|
299 |
+
|
300 |
+
# For demonstration: create batches
|
301 |
+
X_train_new = ir_seq.reshape((1, 8, 95, 95)) # Shape: (1, 8, 95, 95)
|
302 |
+
# X_test_new = X_valid = X_train_new.copy() # Dummy copies for now
|
303 |
+
# st.write(f"X_train_new shape: {X_train_new.shape}")
|
304 |
+
cc_mask= compute_convective_core_masks(X_train_new)
|
305 |
+
# st.write("CC Mask Shape:", cc_mask.shape)
|
306 |
+
hov_m_train = generate_hovmoller(X_train_new)
|
307 |
+
# hov_m_test = generate_hovmoller(X_test_new)
|
308 |
+
# hov_m_valid = generate_hovmoller(X_valid)
|
309 |
+
hov_m_train[np.isnan(hov_m_train)] = 0
|
310 |
+
hov_m_train = hov_m_train.transpose(0, 2, 3, 1)
|
311 |
+
# st.success("Hovmöller diagrams generated ✅")
|
312 |
+
# st.write("Hovmöller Train Shape:", hov_m_train.shape)
|
313 |
+
# st.write("Hovmöller Test Shape:", hov_m_test.shape)
|
314 |
+
# st.write("Hovmöller Valid Shape:", hov_m_valid.shape)
|
315 |
+
|
316 |
+
# Visualize first sample
|
317 |
+
# st.subheader("Hovmöller Sample (Train Set)")
|
318 |
+
# for t in range(8):
|
319 |
+
# st.image(hov_m_train[0, t], caption=f"Time Step {t+1}", clamp=True, width=150)
|
320 |
+
# st.write(hov_m_train[0,0])
|
321 |
+
cc_mask[np.isnan(cc_mask)] = 0
|
322 |
+
cc_mask=cc_mask.reshape(1, 8, 95, 95, 1)
|
323 |
+
i_images=cc_mask+ir_seq
|
324 |
+
reduced_images = np.concatenate([i_images,pmw_seq ], axis=-1)
|
325 |
+
reduced_images[np.isnan(reduced_images)] = 0
|
326 |
+
# st.write("Reduced Images Shape:", reduced_images.shape)
|
327 |
+
# y=np.isnan(reduced_images).sum()
|
328 |
+
# st.write("Reduced Images NaN Count:", y)
|
329 |
+
# model_predict_fn = {
|
330 |
+
# "ConvGRU": predict,
|
331 |
+
# "ConvLSTM": predict_lstm,
|
332 |
+
# "3DCNN":predict_3dcnn,
|
333 |
+
# "Traj-GRU": predict_trajgru,
|
334 |
+
# "spatiotemporalLSTM": predict_stlstm,
|
335 |
+
# "Unet_LSTM": predict_unetlstm,
|
336 |
+
# }[model_choice]
|
337 |
+
if model_choice == "Unet_LSTM":
|
338 |
+
import tensorflow as tf
|
339 |
+
|
340 |
+
def tf_gradient_magnitude(images):
|
341 |
+
# Sobel kernels
|
342 |
+
sobel_x = tf.constant([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=tf.float32)
|
343 |
+
sobel_y = tf.constant([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=tf.float32)
|
344 |
+
sobel_x = tf.reshape(sobel_x, [3, 3, 1, 1])
|
345 |
+
sobel_y = tf.reshape(sobel_y, [3, 3, 1, 1])
|
346 |
+
|
347 |
+
images = tf.convert_to_tensor(images, dtype=tf.float32)
|
348 |
+
images = tf.expand_dims(images, -1)
|
349 |
+
|
350 |
+
gx = tf.nn.conv2d(images, sobel_x, strides=1, padding='SAME')
|
351 |
+
gy = tf.nn.conv2d(images, sobel_y, strides=1, padding='SAME')
|
352 |
+
grad_mag = tf.sqrt(tf.square(gx) + tf.square(gy) + 1e-6)
|
353 |
+
|
354 |
+
return tf.squeeze(grad_mag, -1).numpy()
|
355 |
+
def GM_maps_prep(ir):
|
356 |
+
GM_maps=[]
|
357 |
+
for i in ir:
|
358 |
+
GM_map = tf_gradient_magnitude(i)
|
359 |
+
GM_maps.append(GM_map)
|
360 |
+
GM_maps=np.array(GM_maps)
|
361 |
+
return GM_maps
|
362 |
+
ir_seq=ir_seq.reshape(8, 95, 95, 1)
|
363 |
+
GM_maps = GM_maps_prep(ir_seq)
|
364 |
+
print(GM_maps.shape)
|
365 |
+
GM_maps=GM_maps.reshape(1, 8, 95, 95, 1)
|
366 |
+
i_images=cc_mask+ir_seq+GM_maps
|
367 |
+
reduced_images = np.concatenate([i_images,pmw_seq ], axis=-1)
|
368 |
+
reduced_images[np.isnan(reduced_images)] = 0
|
369 |
+
print(reduced_images.shape)
|
370 |
+
y = model_predict_fn(reduced_images, hov_m_train, train_vmax_3d, lat_processed, lon_processed, v_max_diff)
|
371 |
+
else:
|
372 |
+
y = model_predict_fn(reduced_images, hov_m_train, train_vmax_3d, lat_processed, lon_processed, v_max_diff)
|
373 |
+
st.write("Predicted Vmax:", y)
|
374 |
+
else:
|
375 |
+
st.error("Make sure you uploaded exactly 8 IR and 8 PMW images.")
|