broadfield-dev commited on
Commit
e16d9dd
·
verified ·
1 Parent(s): 03610d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -150,7 +150,6 @@ def fetch_and_process_sdo_data(target_dt, forecast_horizon_minutes):
150
  img_resized = img.resize((img_size, img_size), Image.Resampling.LANCZOS)
151
  norm_data = np.array(img_resized, dtype=np.float32)
152
 
153
- # *** FIX: Retrieve the correct scaler object from the dictionary for the current channel ***
154
  scaler = scalers_dict[channel]
155
  scaled_data = scaler.transform(norm_data.reshape(-1, 1)).reshape(norm_data.shape)
156
  channel_tensors.append(torch.from_numpy(scaled_data.astype(np.float32)))
@@ -173,22 +172,24 @@ def run_inference(input_tensor):
173
  with torch.no_grad():
174
  with torch.autocast(device_type=device.split(':')[0], dtype=torch.bfloat16):
175
  prediction = model(input_batch)
176
- return prediction.cpu()
 
 
177
 
178
  def generate_visualization(last_input_map, prediction_tensor, target_map, channel_name):
179
  if last_input_map is None: return None, None, None
180
  c_idx = SDO_CHANNELS.index(channel_name)
181
 
182
- # *** FIX: Retrieve the correct scaler object for the current channel to get its parameters ***
183
- scaler = APP_CACHE["scalers"][channel_name]
184
  params = scaler.to_dict()
185
- mean, std = params['mean'], params['std']
186
-
187
- # Note: The inverse transform for the simplified JPEG pipeline might differ from the original
188
- # We will use a standard inverse scaling, which is the most logical approach here.
189
- pred_slice_scaled = prediction_tensor[0, c_idx].numpy()
190
- pred_slice = (pred_slice_scaled * std) + mean
191
 
 
 
 
 
 
192
  target_img_data = np.array(target_map[channel_name])
193
  vmax = np.quantile(np.nan_to_num(target_img_data), 0.995)
194
  cmap_name = f"sdoaia{channel_name.replace('aia', '')}" if 'aia' in channel_name else 'hmimag'
 
150
  img_resized = img.resize((img_size, img_size), Image.Resampling.LANCZOS)
151
  norm_data = np.array(img_resized, dtype=np.float32)
152
 
 
153
  scaler = scalers_dict[channel]
154
  scaled_data = scaler.transform(norm_data.reshape(-1, 1)).reshape(norm_data.shape)
155
  channel_tensors.append(torch.from_numpy(scaled_data.astype(np.float32)))
 
172
  with torch.no_grad():
173
  with torch.autocast(device_type=device.split(':')[0], dtype=torch.bfloat16):
174
  prediction = model(input_batch)
175
+
176
+ # *** FIX: Convert from BFloat16 to Float32 before returning, making it NumPy compatible ***
177
+ return prediction.cpu().to(torch.float32)
178
 
179
  def generate_visualization(last_input_map, prediction_tensor, target_map, channel_name):
180
  if last_input_map is None: return None, None, None
181
  c_idx = SDO_CHANNELS.index(channel_name)
182
 
183
+ scalers_dict = APP_CACHE["scalers"]
184
+ scaler = scalers_dict[channel_name]
185
  params = scaler.to_dict()
186
+ mean, std, epsilon, sl_scale_factor = params['mean'], params['std'], params['epsilon'], params['sl_scale_factor']
 
 
 
 
 
187
 
188
+ # The prediction_tensor is now Float32, so .numpy() will work
189
+ pred_slice = inverse_transform_single_channel(
190
+ prediction_tensor[0, c_idx].numpy(), mean=mean, std=std, epsilon=epsilon, sl_scale_factor=sl_scale_factor
191
+ )
192
+
193
  target_img_data = np.array(target_map[channel_name])
194
  vmax = np.quantile(np.nan_to_num(target_img_data), 0.995)
195
  cmap_name = f"sdoaia{channel_name.replace('aia', '')}" if 'aia' in channel_name else 'hmimag'