Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ from tensorflow.keras.applications.vgg16 import preprocess_input as vgg_preproce
|
|
11 |
from tensorflow.keras.applications.xception import preprocess_input as xce_preprocess
|
12 |
from tensorflow.keras.losses import BinaryFocalCrossentropy
|
13 |
from PIL import Image
|
14 |
-
from xgboost import XGBClassifier
|
15 |
|
16 |
# --- CONFIGURATION ---
|
17 |
FOREST_COORDS = {'Pakistan Forest': (34.0, 73.0)}
|
@@ -28,10 +27,12 @@ API_URL = (
|
|
28 |
# --- LOAD MODELS ---
|
29 |
def load_models():
|
30 |
try:
|
|
|
31 |
vgg_model = load_model(
|
32 |
'vgg16_focal_unfreeze_more.keras',
|
33 |
custom_objects={'BinaryFocalCrossentropy': BinaryFocalCrossentropy}
|
34 |
)
|
|
|
35 |
def focal_loss_fixed(gamma=2., alpha=.25):
|
36 |
import tensorflow.keras.backend as K
|
37 |
def loss_fn(y_true, y_pred):
|
@@ -45,11 +46,10 @@ def load_models():
|
|
45 |
'severity_post_tta.keras',
|
46 |
custom_objects={'focal_loss_fixed': focal_loss_fixed()}
|
47 |
)
|
48 |
-
# Reload
|
49 |
-
|
50 |
-
xgb_model.
|
51 |
-
|
52 |
-
lr_model = joblib.load('wildfire_logistic_model_synthetic.joblib')
|
53 |
return vgg_model, xce_model, rf_model, xgb_model, lr_model
|
54 |
except Exception as e:
|
55 |
print(f"Error loading models: {e}")
|
@@ -59,13 +59,13 @@ def load_models():
|
|
59 |
vgg_model, xce_model, rf_model, xgb_model, lr_model = load_models()
|
60 |
|
61 |
target_map = {0: 'mild', 1: 'moderate', 2: 'severe'}
|
62 |
-
trend_map
|
63 |
task_rules = {
|
64 |
-
'mild':
|
65 |
-
'moderate':{'decrease':'mild','same':'moderate','increase':'severe'},
|
66 |
-
'severe':
|
67 |
}
|
68 |
-
recommendations = { ... } # (
|
69 |
|
70 |
# --- PIPELINE FUNCTIONS ---
|
71 |
def detect_fire(img):
|
@@ -87,7 +87,7 @@ def classify_severity(img):
|
|
87 |
x = keras_image.img_to_array(img.resize((224,224)))[None]
|
88 |
x = xce_preprocess(x)
|
89 |
preds = xce_model.predict(x)
|
90 |
-
rf_p
|
91 |
xgb_p = xgb_model.predict(preds)[0]
|
92 |
ensemble = int(round((rf_p + xgb_p) / 2))
|
93 |
return target_map.get(ensemble, 'moderate')
|
@@ -95,6 +95,7 @@ def classify_severity(img):
|
|
95 |
print(f"Error in severity classification: {e}")
|
96 |
return 'moderate'
|
97 |
|
|
|
98 |
def fetch_weather_trend(lat, lon):
|
99 |
try:
|
100 |
end = datetime.utcnow()
|
@@ -106,6 +107,7 @@ def fetch_weather_trend(lat, lon):
|
|
106 |
response.raise_for_status()
|
107 |
df = pd.DataFrame(response.json().get('daily', {}))
|
108 |
except Exception:
|
|
|
109 |
df = pd.DataFrame({
|
110 |
'date': [(datetime.utcnow() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(1,-1,-1)],
|
111 |
'precipitation_sum': [5, 2],
|
@@ -115,12 +117,10 @@ def fetch_weather_trend(lat, lon):
|
|
115 |
'relative_humidity_2m_min': [40, 35],
|
116 |
'windspeed_10m_max': [15, 18]
|
117 |
})
|
118 |
-
|
119 |
-
'relative_humidity_2m_max','relative_humidity_2m_min','windspeed_10m_max']:
|
120 |
-
df[c] = pd.to_numeric(df[c], errors='coerce')
|
121 |
df['temperature'] = (df['temperature_2m_max'] + df['temperature_2m_min']) / 2
|
122 |
-
df['humidity']
|
123 |
-
df['wind_speed']
|
124 |
df['precipitation'] = df['precipitation_sum']
|
125 |
df['fire_risk_score'] = (
|
126 |
0.4 * (df['temperature'] / 55) +
|
@@ -134,15 +134,10 @@ def fetch_weather_trend(lat, lon):
|
|
134 |
return trend_map.get(trend_cl, 'same')
|
135 |
return 'same'
|
136 |
|
|
|
137 |
def generate_recommendations(original_severity, weather_trend):
|
138 |
-
"""
|
139 |
-
Generate management recommendations based on original severity and weather trend.
|
140 |
-
Returns a formatted markdown string.
|
141 |
-
"""
|
142 |
projected = task_rules[original_severity][weather_trend]
|
143 |
rec = recommendations[projected]
|
144 |
-
|
145 |
-
# Build the output string using f-strings and implicit concatenation
|
146 |
return (
|
147 |
f"**Original Severity:** {original_severity.title()}\n"
|
148 |
f"**Weather Trend:** {weather_trend.title()}\n"
|
@@ -155,20 +150,21 @@ def generate_recommendations(original_severity, weather_trend):
|
|
155 |
f"**Education:** {rec['education']}"
|
156 |
)
|
157 |
|
|
|
158 |
def pipeline(image):
|
159 |
if image is None:
|
160 |
-
return "No image provided","N/A","N/A","**Please upload an image to analyze**"
|
161 |
img = Image.fromarray(image).convert('RGB')
|
162 |
fire, prob = detect_fire(img)
|
163 |
if not fire:
|
164 |
return (
|
165 |
f"No wildfire detected (confidence: {(1-prob)*100:.1f}%)",
|
166 |
-
"N/A","N/A",
|
167 |
"**No wildfire detected. Stay alert.**"
|
168 |
)
|
169 |
-
sev
|
170 |
trend = fetch_weather_trend(*FOREST_COORDS['Pakistan Forest'])
|
171 |
-
recs
|
172 |
return (
|
173 |
f"**Wildfire detected** (confidence: {prob*100:.1f}%)",
|
174 |
f"**{sev.title()}**",
|
@@ -176,12 +172,13 @@ def pipeline(image):
|
|
176 |
recs
|
177 |
)
|
178 |
|
|
|
179 |
def safe_pipeline(image):
|
180 |
try:
|
181 |
return pipeline(image)
|
182 |
except Exception as e:
|
183 |
print(f"Error in pipeline: {e}")
|
184 |
-
return "Error during analysis","N/A","N/A", f"**Error: {e}**"
|
185 |
|
186 |
# --- GRADIO UI ---
|
187 |
custom_css = '''
|
@@ -196,14 +193,16 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
|
|
196 |
with gr.Column():
|
197 |
gr.Markdown("# 🔥 Wildfire Command Center")
|
198 |
gr.Markdown("Upload a forest image to detect wildfire, classify severity, and get actionable recommendations.")
|
|
|
199 |
with gr.Tabs():
|
200 |
with gr.TabItem("Analyze 🔍"):
|
201 |
with gr.Row():
|
202 |
with gr.Column(scale=1):
|
203 |
-
|
204 |
-
|
|
|
205 |
with gr.Column(scale=1):
|
206 |
-
status_out
|
207 |
severity_out = gr.Markdown("---", label="Severity")
|
208 |
trend_out = gr.Markdown("---", label="Weather Trend")
|
209 |
recs_out = gr.Markdown("---", label="Recommendations")
|
@@ -212,6 +211,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
|
|
212 |
last_severity = gr.Markdown("---", elem_classes="output-card")
|
213 |
last_trend = gr.Markdown("---", elem_classes="output-card")
|
214 |
last_recs = gr.Markdown("---", elem_classes="output-card")
|
|
|
215 |
run_btn.click(
|
216 |
fn=safe_pipeline,
|
217 |
inputs=image_input,
|
@@ -221,5 +221,6 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
|
|
221 |
inputs=[status_out, severity_out, trend_out, recs_out],
|
222 |
outputs=[last_status, last_severity, last_trend, last_recs]
|
223 |
)
|
|
|
224 |
if __name__ == '__main__':
|
225 |
demo.queue(api_open=True).launch()
|
|
|
11 |
from tensorflow.keras.applications.xception import preprocess_input as xce_preprocess
|
12 |
from tensorflow.keras.losses import BinaryFocalCrossentropy
|
13 |
from PIL import Image
|
|
|
14 |
|
15 |
# --- CONFIGURATION ---
|
16 |
FOREST_COORDS = {'Pakistan Forest': (34.0, 73.0)}
|
|
|
27 |
# --- LOAD MODELS ---
|
28 |
def load_models():
|
29 |
try:
|
30 |
+
# VGG fire detection model
|
31 |
vgg_model = load_model(
|
32 |
'vgg16_focal_unfreeze_more.keras',
|
33 |
custom_objects={'BinaryFocalCrossentropy': BinaryFocalCrossentropy}
|
34 |
)
|
35 |
+
# Xception severity model
|
36 |
def focal_loss_fixed(gamma=2., alpha=.25):
|
37 |
import tensorflow.keras.backend as K
|
38 |
def loss_fn(y_true, y_pred):
|
|
|
46 |
'severity_post_tta.keras',
|
47 |
custom_objects={'focal_loss_fixed': focal_loss_fixed()}
|
48 |
)
|
49 |
+
# Reload ensemble models from .pkl
|
50 |
+
rf_model = joblib.load('ensemble_rf_model.pkl')
|
51 |
+
xgb_model = joblib.load('ensemble_xgb_model.pkl')
|
52 |
+
lr_model = joblib.load('wildfire_logistic_model_synthetic.joblib')
|
|
|
53 |
return vgg_model, xce_model, rf_model, xgb_model, lr_model
|
54 |
except Exception as e:
|
55 |
print(f"Error loading models: {e}")
|
|
|
59 |
vgg_model, xce_model, rf_model, xgb_model, lr_model = load_models()
|
60 |
|
61 |
target_map = {0: 'mild', 1: 'moderate', 2: 'severe'}
|
62 |
+
trend_map = {1: 'increase', 0: 'same', -1: 'decrease'}
|
63 |
task_rules = {
|
64 |
+
'mild': {'decrease': 'mild', 'same': 'mild', 'increase': 'moderate'},
|
65 |
+
'moderate': {'decrease': 'mild', 'same': 'moderate', 'increase': 'severe'},
|
66 |
+
'severe': {'decrease': 'moderate', 'same': 'severe', 'increase': 'severe'}
|
67 |
}
|
68 |
+
recommendations = { ... } # (your existing recommendations dict)
|
69 |
|
70 |
# --- PIPELINE FUNCTIONS ---
|
71 |
def detect_fire(img):
|
|
|
87 |
x = keras_image.img_to_array(img.resize((224,224)))[None]
|
88 |
x = xce_preprocess(x)
|
89 |
preds = xce_model.predict(x)
|
90 |
+
rf_p = rf_model.predict(preds)[0]
|
91 |
xgb_p = xgb_model.predict(preds)[0]
|
92 |
ensemble = int(round((rf_p + xgb_p) / 2))
|
93 |
return target_map.get(ensemble, 'moderate')
|
|
|
95 |
print(f"Error in severity classification: {e}")
|
96 |
return 'moderate'
|
97 |
|
98 |
+
|
99 |
def fetch_weather_trend(lat, lon):
|
100 |
try:
|
101 |
end = datetime.utcnow()
|
|
|
107 |
response.raise_for_status()
|
108 |
df = pd.DataFrame(response.json().get('daily', {}))
|
109 |
except Exception:
|
110 |
+
# fallback dummy data
|
111 |
df = pd.DataFrame({
|
112 |
'date': [(datetime.utcnow() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(1,-1,-1)],
|
113 |
'precipitation_sum': [5, 2],
|
|
|
117 |
'relative_humidity_2m_min': [40, 35],
|
118 |
'windspeed_10m_max': [15, 18]
|
119 |
})
|
120 |
+
# compute features
|
|
|
|
|
121 |
df['temperature'] = (df['temperature_2m_max'] + df['temperature_2m_min']) / 2
|
122 |
+
df['humidity'] = (df['relative_humidity_2m_max'] + df['relative_humidity_2m_min']) / 2
|
123 |
+
df['wind_speed'] = df['windspeed_10m_max']
|
124 |
df['precipitation'] = df['precipitation_sum']
|
125 |
df['fire_risk_score'] = (
|
126 |
0.4 * (df['temperature'] / 55) +
|
|
|
134 |
return trend_map.get(trend_cl, 'same')
|
135 |
return 'same'
|
136 |
|
137 |
+
|
138 |
def generate_recommendations(original_severity, weather_trend):
|
|
|
|
|
|
|
|
|
139 |
projected = task_rules[original_severity][weather_trend]
|
140 |
rec = recommendations[projected]
|
|
|
|
|
141 |
return (
|
142 |
f"**Original Severity:** {original_severity.title()}\n"
|
143 |
f"**Weather Trend:** {weather_trend.title()}\n"
|
|
|
150 |
f"**Education:** {rec['education']}"
|
151 |
)
|
152 |
|
153 |
+
|
154 |
def pipeline(image):
|
155 |
if image is None:
|
156 |
+
return "No image provided", "N/A", "N/A", "**Please upload an image to analyze**"
|
157 |
img = Image.fromarray(image).convert('RGB')
|
158 |
fire, prob = detect_fire(img)
|
159 |
if not fire:
|
160 |
return (
|
161 |
f"No wildfire detected (confidence: {(1-prob)*100:.1f}%)",
|
162 |
+
"N/A", "N/A",
|
163 |
"**No wildfire detected. Stay alert.**"
|
164 |
)
|
165 |
+
sev = classify_severity(img)
|
166 |
trend = fetch_weather_trend(*FOREST_COORDS['Pakistan Forest'])
|
167 |
+
recs = generate_recommendations(sev, trend)
|
168 |
return (
|
169 |
f"**Wildfire detected** (confidence: {prob*100:.1f}%)",
|
170 |
f"**{sev.title()}**",
|
|
|
172 |
recs
|
173 |
)
|
174 |
|
175 |
+
|
176 |
def safe_pipeline(image):
|
177 |
try:
|
178 |
return pipeline(image)
|
179 |
except Exception as e:
|
180 |
print(f"Error in pipeline: {e}")
|
181 |
+
return "Error during analysis", "N/A", "N/A", f"**Error: {e}**"
|
182 |
|
183 |
# --- GRADIO UI ---
|
184 |
custom_css = '''
|
|
|
193 |
with gr.Column():
|
194 |
gr.Markdown("# 🔥 Wildfire Command Center")
|
195 |
gr.Markdown("Upload a forest image to detect wildfire, classify severity, and get actionable recommendations.")
|
196 |
+
|
197 |
with gr.Tabs():
|
198 |
with gr.TabItem("Analyze 🔍"):
|
199 |
with gr.Row():
|
200 |
with gr.Column(scale=1):
|
201 |
+
# use ImageEditor if in-browser annotation is needed, otherwise simple Image
|
202 |
+
image_input = gr.Image(type="numpy", label="Forest Image")
|
203 |
+
run_btn = gr.Button("Analyze Now", variant="primary")
|
204 |
with gr.Column(scale=1):
|
205 |
+
status_out = gr.Markdown("*Status will appear here*", label="Status")
|
206 |
severity_out = gr.Markdown("---", label="Severity")
|
207 |
trend_out = gr.Markdown("---", label="Weather Trend")
|
208 |
recs_out = gr.Markdown("---", label="Recommendations")
|
|
|
211 |
last_severity = gr.Markdown("---", elem_classes="output-card")
|
212 |
last_trend = gr.Markdown("---", elem_classes="output-card")
|
213 |
last_recs = gr.Markdown("---", elem_classes="output-card")
|
214 |
+
|
215 |
run_btn.click(
|
216 |
fn=safe_pipeline,
|
217 |
inputs=image_input,
|
|
|
221 |
inputs=[status_out, severity_out, trend_out, recs_out],
|
222 |
outputs=[last_status, last_severity, last_trend, last_recs]
|
223 |
)
|
224 |
+
|
225 |
if __name__ == '__main__':
|
226 |
demo.queue(api_open=True).launch()
|