Spaces:
Sleeping
Sleeping
Uploaded codebass
Browse files- app.py +60 -0
- config.py +32 -0
- mapping_utils.py +517 -0
- orchestrator.py +79 -0
- recommendation_agents.py +355 -0
- requirements.txt +15 -0
- risk_agent.py +171 -0
- tools.py +721 -0
- ui.py +655 -0
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
CAVA Platform - Main Application
|
4 |
+
"""
|
5 |
+
|
6 |
+
from config import (
|
7 |
+
API_KEY,
|
8 |
+
NASA_FIRMS_MAP_KEY,
|
9 |
+
GRADIO_SERVER_NAME,
|
10 |
+
GRADIO_SERVER_PORT,
|
11 |
+
GRADIO_SHARE,
|
12 |
+
model,
|
13 |
+
)
|
14 |
+
from ui.ui import ClimateRiskUI
|
15 |
+
|
16 |
+
|
17 |
+
def main():
|
18 |
+
"""Main function to launch the application."""
|
19 |
+
|
20 |
+
# Check API key configuration
|
21 |
+
if not API_KEY or API_KEY == "your-anthropic-api-key-here":
|
22 |
+
print("⚠️ WARNING: ANTHROPIC_API_KEY not properly configured!")
|
23 |
+
print(" Please add your API key to the .env file:")
|
24 |
+
print(" ANTHROPIC_API_KEY=your-actual-api-key-here")
|
25 |
+
print(" You can get one at: https://console.anthropic.com/")
|
26 |
+
print("")
|
27 |
+
else:
|
28 |
+
print("✅ Anthropic API key loaded from .env file")
|
29 |
+
|
30 |
+
if not NASA_FIRMS_MAP_KEY or NASA_FIRMS_MAP_KEY == "your-nasa-firms-api-key-here":
|
31 |
+
print("ℹ️ NASA FIRMS API key not configured (optional)")
|
32 |
+
print(" For wildfire data, add to .env: NASA_FIRMS_MAP_KEY=your-key")
|
33 |
+
print("")
|
34 |
+
else:
|
35 |
+
print("✅ NASA FIRMS API key loaded from .env file")
|
36 |
+
|
37 |
+
try:
|
38 |
+
ui = ClimateRiskUI(model)
|
39 |
+
app = ui.create_interface()
|
40 |
+
|
41 |
+
print("🚀 Launching CAVA-AI Platform...")
|
42 |
+
print(f"📱 Open your browser and go to: http://localhost:{GRADIO_SERVER_PORT}")
|
43 |
+
print("")
|
44 |
+
|
45 |
+
app.launch(
|
46 |
+
server_name=GRADIO_SERVER_NAME,
|
47 |
+
server_port=GRADIO_SERVER_PORT,
|
48 |
+
share=GRADIO_SHARE,
|
49 |
+
show_error=True,
|
50 |
+
)
|
51 |
+
|
52 |
+
except Exception as e:
|
53 |
+
print(f"❌ Launch error: {e}")
|
54 |
+
import traceback
|
55 |
+
|
56 |
+
traceback.print_exc()
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
main()
|
config.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from smolagents import LiteLLMModel
|
4 |
+
|
5 |
+
from phoenix.otel import register
|
6 |
+
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
|
7 |
+
|
8 |
+
register()
|
9 |
+
SmolagentsInstrumentor().instrument()
|
10 |
+
|
11 |
+
import os
|
12 |
+
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
# API Configuration
|
16 |
+
API_KEY = os.getenv("OPENAI_API_KEY", "ANTHROPIC_API_KEY")
|
17 |
+
if not API_KEY:
|
18 |
+
print("⚠️ API key not found in .env file!")
|
19 |
+
|
20 |
+
NASA_FIRMS_MAP_KEY = os.getenv("NASA_FIRMS_MAP_KEY", "b16df29134bfb809c751d7e283c71312")
|
21 |
+
|
22 |
+
# Server configuration
|
23 |
+
GRADIO_SERVER_NAME = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0")
|
24 |
+
GRADIO_SERVER_PORT = int(os.getenv("GRADIO_SERVER_PORT", "7860"))
|
25 |
+
GRADIO_SHARE = os.getenv("GRADIO_SHARE", "False").lower() == "true"
|
26 |
+
|
27 |
+
# Model configuration
|
28 |
+
MODEL_ID = os.getenv("MODEL_ID", "claude-sonnet-4-20250514")
|
29 |
+
|
30 |
+
|
31 |
+
# Initialize the model
|
32 |
+
model = LiteLLMModel(model_id=MODEL_ID, api_key="PLACE ANTHROPIC API KEY OR OTHER API KEY HERE")
|
mapping_utils.py
ADDED
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import folium
|
3 |
+
|
4 |
+
COUNTRIES_AND_CITIES = {
|
5 |
+
"Afghanistan": ["Kabul", "Kandahar", "Herat", "Mazar-i-Sharif", "Jalalabad"],
|
6 |
+
"Albania": ["Tirana", "Durrës", "Vlorë", "Shkodër", "Fier"],
|
7 |
+
"Algeria": ["Algiers", "Oran", "Constantine", "Annaba", "Blida"],
|
8 |
+
"Argentina": ["Buenos Aires", "Córdoba", "Rosario", "Mendoza", "La Plata"],
|
9 |
+
"Armenia": ["Yerevan", "Gyumri", "Vanadzor", "Vagharshapat", "Hrazdan"],
|
10 |
+
"Australia": [
|
11 |
+
"Sydney",
|
12 |
+
"Melbourne",
|
13 |
+
"Brisbane",
|
14 |
+
"Perth",
|
15 |
+
"Adelaide",
|
16 |
+
"Gold Coast",
|
17 |
+
"Newcastle",
|
18 |
+
"Canberra",
|
19 |
+
"Sunshine Coast",
|
20 |
+
"Wollongong",
|
21 |
+
],
|
22 |
+
"Austria": ["Vienna", "Graz", "Linz", "Salzburg", "Innsbruck"],
|
23 |
+
"Azerbaijan": ["Baku", "Ganja", "Sumqayit", "Mingachevir", "Lankaran"],
|
24 |
+
"Bahrain": ["Manama", "Riffa", "Muharraq", "Hamad Town", "A'ali"],
|
25 |
+
"Bangladesh": ["Dhaka", "Chittagong", "Sylhet", "Rajshahi", "Khulna"],
|
26 |
+
"Belarus": ["Minsk", "Gomel", "Mogilev", "Vitebsk", "Grodno"],
|
27 |
+
"Belgium": ["Brussels", "Antwerp", "Ghent", "Charleroi", "Liège"],
|
28 |
+
"Bolivia": ["La Paz", "Santa Cruz", "Cochabamba", "Sucre", "Oruro"],
|
29 |
+
"Bosnia and Herzegovina": ["Sarajevo", "Banja Luka", "Tuzla", "Zenica", "Mostar"],
|
30 |
+
"Brazil": [
|
31 |
+
"São Paulo",
|
32 |
+
"Rio de Janeiro",
|
33 |
+
"Brasília",
|
34 |
+
"Salvador",
|
35 |
+
"Fortaleza",
|
36 |
+
"Belo Horizonte",
|
37 |
+
"Manaus",
|
38 |
+
"Curitiba",
|
39 |
+
"Recife",
|
40 |
+
"Goiânia",
|
41 |
+
],
|
42 |
+
"Bulgaria": ["Sofia", "Plovdiv", "Varna", "Burgas", "Ruse"],
|
43 |
+
"Cambodia": ["Phnom Penh", "Siem Reap", "Battambang", "Sihanoukville", "Poipet"],
|
44 |
+
"Canada": [
|
45 |
+
"Toronto",
|
46 |
+
"Montreal",
|
47 |
+
"Vancouver",
|
48 |
+
"Calgary",
|
49 |
+
"Edmonton",
|
50 |
+
"Ottawa",
|
51 |
+
"Winnipeg",
|
52 |
+
"Quebec City",
|
53 |
+
"Hamilton",
|
54 |
+
"Kitchener",
|
55 |
+
],
|
56 |
+
"Chile": ["Santiago", "Valparaíso", "Concepción", "La Serena", "Antofagasta"],
|
57 |
+
"China": [
|
58 |
+
"Shanghai",
|
59 |
+
"Beijing",
|
60 |
+
"Chongqing",
|
61 |
+
"Tianjin",
|
62 |
+
"Guangzhou",
|
63 |
+
"Shenzhen",
|
64 |
+
"Wuhan",
|
65 |
+
"Dongguan",
|
66 |
+
"Chengdu",
|
67 |
+
"Nanjing",
|
68 |
+
],
|
69 |
+
"Colombia": ["Bogotá", "Medellín", "Cali", "Barranquilla", "Cartagena"],
|
70 |
+
"Croatia": ["Zagreb", "Split", "Rijeka", "Osijek", "Zadar"],
|
71 |
+
"Czech Republic": ["Prague", "Brno", "Ostrava", "Plzen", "Liberec"],
|
72 |
+
"Denmark": ["Copenhagen", "Aarhus", "Odense", "Aalborg", "Esbjerg"],
|
73 |
+
"Ecuador": ["Quito", "Guayaquil", "Cuenca", "Santo Domingo", "Machala"],
|
74 |
+
"Egypt": ["Cairo", "Alexandria", "Giza", "Shubra El Kheima", "Port Said"],
|
75 |
+
"Estonia": ["Tallinn", "Tartu", "Narva", "Pärnu", "Kohtla-Järve"],
|
76 |
+
"Ethiopia": ["Addis Ababa", "Dire Dawa", "Mekelle", "Adama", "Awassa"],
|
77 |
+
"Finland": ["Helsinki", "Espoo", "Tampere", "Vantaa", "Oulu"],
|
78 |
+
"France": [
|
79 |
+
"Paris",
|
80 |
+
"Lyon",
|
81 |
+
"Marseille",
|
82 |
+
"Toulouse",
|
83 |
+
"Nice",
|
84 |
+
"Nantes",
|
85 |
+
"Strasbourg",
|
86 |
+
"Montpellier",
|
87 |
+
"Bordeaux",
|
88 |
+
"Lille",
|
89 |
+
],
|
90 |
+
"Georgia": ["Tbilisi", "Batumi", "Kutaisi", "Rustavi", "Gori"],
|
91 |
+
"Germany": [
|
92 |
+
"Berlin",
|
93 |
+
"Hamburg",
|
94 |
+
"Munich",
|
95 |
+
"Cologne",
|
96 |
+
"Frankfurt",
|
97 |
+
"Stuttgart",
|
98 |
+
"Düsseldorf",
|
99 |
+
"Dortmund",
|
100 |
+
"Essen",
|
101 |
+
"Leipzig",
|
102 |
+
],
|
103 |
+
"Ghana": ["Accra", "Kumasi", "Tamale", "Takoradi", "Cape Coast"],
|
104 |
+
"Greece": ["Athens", "Thessaloniki", "Patras", "Heraklion", "Larissa"],
|
105 |
+
"Hungary": ["Budapest", "Debrecen", "Szeged", "Miskolc", "Pécs"],
|
106 |
+
"Iceland": ["Reykjavik", "Kópavogur", "Hafnarfjörður", "Akureyri", "Reykjanesbær"],
|
107 |
+
"India": [
|
108 |
+
"Mumbai",
|
109 |
+
"Delhi",
|
110 |
+
"Bangalore",
|
111 |
+
"Hyderabad",
|
112 |
+
"Chennai",
|
113 |
+
"Kolkata",
|
114 |
+
"Ahmedabad",
|
115 |
+
"Pune",
|
116 |
+
"Surat",
|
117 |
+
"Jaipur",
|
118 |
+
],
|
119 |
+
"Indonesia": [
|
120 |
+
"Jakarta",
|
121 |
+
"Surabaya",
|
122 |
+
"Bandung",
|
123 |
+
"Bekasi",
|
124 |
+
"Medan",
|
125 |
+
"Tangerang",
|
126 |
+
"Depok",
|
127 |
+
"Semarang",
|
128 |
+
"Palembang",
|
129 |
+
"Makassar",
|
130 |
+
],
|
131 |
+
"Iran": ["Tehran", "Mashhad", "Isfahan", "Karaj", "Shiraz"],
|
132 |
+
"Iraq": ["Baghdad", "Basra", "Mosul", "Erbil", "Najaf"],
|
133 |
+
"Ireland": ["Dublin", "Cork", "Limerick", "Galway", "Waterford"],
|
134 |
+
"Israel": ["Jerusalem", "Tel Aviv", "Haifa", "Rishon LeZion", "Petah Tikva"],
|
135 |
+
"Italy": [
|
136 |
+
"Rome",
|
137 |
+
"Milan",
|
138 |
+
"Naples",
|
139 |
+
"Turin",
|
140 |
+
"Palermo",
|
141 |
+
"Genoa",
|
142 |
+
"Bologna",
|
143 |
+
"Florence",
|
144 |
+
"Bari",
|
145 |
+
"Catania",
|
146 |
+
],
|
147 |
+
"Japan": [
|
148 |
+
"Tokyo",
|
149 |
+
"Osaka",
|
150 |
+
"Yokohama",
|
151 |
+
"Nagoya",
|
152 |
+
"Sapporo",
|
153 |
+
"Fukuoka",
|
154 |
+
"Kobe",
|
155 |
+
"Kawasaki",
|
156 |
+
"Kyoto",
|
157 |
+
"Saitama",
|
158 |
+
],
|
159 |
+
"Jordan": ["Amman", "Zarqa", "Irbid", "Russeifa", "Wadi as-Ser"],
|
160 |
+
"Kazakhstan": ["Almaty", "Nur-Sultan", "Shymkent", "Aktobe", "Taraz"],
|
161 |
+
"Kenya": ["Nairobi", "Mombasa", "Kisumu", "Nakuru", "Eldoret"],
|
162 |
+
"Kuwait": ["Kuwait City", "Al Ahmadi", "Hawalli", "As Salimiyah", "Sabah as Salim"],
|
163 |
+
"Latvia": ["Riga", "Daugavpils", "Liepāja", "Jelgava", "Jūrmala"],
|
164 |
+
"Lebanon": ["Beirut", "Tripoli", "Sidon", "Tyre", "Nabatieh"],
|
165 |
+
"Lithuania": ["Vilnius", "Kaunas", "Klaipėda", "Šiauliai", "Panevėžys"],
|
166 |
+
"Luxembourg": [
|
167 |
+
"Luxembourg City",
|
168 |
+
"Esch-sur-Alzette",
|
169 |
+
"Differdange",
|
170 |
+
"Dudelange",
|
171 |
+
"Ettelbruck",
|
172 |
+
],
|
173 |
+
"Malaysia": ["Kuala Lumpur", "George Town", "Ipoh", "Shah Alam", "Petaling Jaya"],
|
174 |
+
"Mexico": [
|
175 |
+
"Mexico City",
|
176 |
+
"Guadalajara",
|
177 |
+
"Monterrey",
|
178 |
+
"Puebla",
|
179 |
+
"Tijuana",
|
180 |
+
"León",
|
181 |
+
"Juárez",
|
182 |
+
"Torreón",
|
183 |
+
"Querétaro",
|
184 |
+
"Mérida",
|
185 |
+
],
|
186 |
+
"Morocco": ["Casablanca", "Rabat", "Fez", "Marrakech", "Agadir"],
|
187 |
+
"Nepal": ["Kathmandu", "Pokhara", "Lalitpur", "Bharatpur", "Biratnagar"],
|
188 |
+
"Netherlands": [
|
189 |
+
"Amsterdam",
|
190 |
+
"Rotterdam",
|
191 |
+
"The Hague",
|
192 |
+
"Utrecht",
|
193 |
+
"Eindhoven",
|
194 |
+
"Tilburg",
|
195 |
+
"Groningen",
|
196 |
+
"Almere",
|
197 |
+
"Breda",
|
198 |
+
"Nijmegen",
|
199 |
+
],
|
200 |
+
"New Zealand": ["Auckland", "Wellington", "Christchurch", "Hamilton", "Tauranga"],
|
201 |
+
"Nigeria": ["Lagos", "Abuja", "Kano", "Ibadan", "Port Harcourt"],
|
202 |
+
"Norway": [
|
203 |
+
"Oslo",
|
204 |
+
"Bergen",
|
205 |
+
"Trondheim",
|
206 |
+
"Stavanger",
|
207 |
+
"Drammen",
|
208 |
+
"Fredrikstad",
|
209 |
+
"Kristiansand",
|
210 |
+
"Sandnes",
|
211 |
+
"Tromsø",
|
212 |
+
"Sarpsborg",
|
213 |
+
],
|
214 |
+
"Pakistan": ["Karachi", "Lahore", "Faisalabad", "Rawalpindi", "Multan"],
|
215 |
+
"Peru": ["Lima", "Arequipa", "Trujillo", "Chiclayo", "Huancayo"],
|
216 |
+
"Philippines": ["Manila", "Quezon City", "Davao", "Cebu City", "Zamboanga"],
|
217 |
+
"Poland": ["Warsaw", "Kraków", "Łódź", "Wrocław", "Poznań"],
|
218 |
+
"Portugal": ["Lisbon", "Porto", "Vila Nova de Gaia", "Amadora", "Braga"],
|
219 |
+
"Qatar": ["Doha", "Al Rayyan", "Umm Salal", "Al Wakrah", "Al Khor"],
|
220 |
+
"Romania": ["Bucharest", "Cluj-Napoca", "Timișoara", "Iași", "Constanța"],
|
221 |
+
"Russia": [
|
222 |
+
"Moscow",
|
223 |
+
"Saint Petersburg",
|
224 |
+
"Novosibirsk",
|
225 |
+
"Yekaterinburg",
|
226 |
+
"Kazan",
|
227 |
+
"Nizhny Novgorod",
|
228 |
+
"Chelyabinsk",
|
229 |
+
"Samara",
|
230 |
+
"Omsk",
|
231 |
+
"Rostov-on-Don",
|
232 |
+
],
|
233 |
+
"Saudi Arabia": ["Riyadh", "Jeddah", "Mecca", "Medina", "Dammam"],
|
234 |
+
"Serbia": ["Belgrade", "Novi Sad", "Niš", "Kragujevac", "Subotica"],
|
235 |
+
"Singapore": ["Singapore"],
|
236 |
+
"Slovakia": ["Bratislava", "Košice", "Prešov", "Žilina", "Banská Bystrica"],
|
237 |
+
"Slovenia": ["Ljubljana", "Maribor", "Celje", "Kranj", "Velenje"],
|
238 |
+
"South Africa": [
|
239 |
+
"Cape Town",
|
240 |
+
"Johannesburg",
|
241 |
+
"Durban",
|
242 |
+
"Pretoria",
|
243 |
+
"Port Elizabeth",
|
244 |
+
],
|
245 |
+
"South Korea": [
|
246 |
+
"Seoul",
|
247 |
+
"Busan",
|
248 |
+
"Incheon",
|
249 |
+
"Daegu",
|
250 |
+
"Daejeon",
|
251 |
+
"Gwangju",
|
252 |
+
"Suwon",
|
253 |
+
"Ulsan",
|
254 |
+
"Changwon",
|
255 |
+
"Goyang",
|
256 |
+
],
|
257 |
+
"Spain": [
|
258 |
+
"Madrid",
|
259 |
+
"Barcelona",
|
260 |
+
"Valencia",
|
261 |
+
"Seville",
|
262 |
+
"Zaragoza",
|
263 |
+
"Málaga",
|
264 |
+
"Murcia",
|
265 |
+
"Palma",
|
266 |
+
"Las Palmas",
|
267 |
+
"Bilbao",
|
268 |
+
],
|
269 |
+
"Sri Lanka": [
|
270 |
+
"Colombo",
|
271 |
+
"Dehiwala-Mount Lavinia",
|
272 |
+
"Moratuwa",
|
273 |
+
"Sri Jayawardenepura Kotte",
|
274 |
+
"Negombo",
|
275 |
+
],
|
276 |
+
"Sweden": [
|
277 |
+
"Stockholm",
|
278 |
+
"Gothenburg",
|
279 |
+
"Malmö",
|
280 |
+
"Uppsala",
|
281 |
+
"Västerås",
|
282 |
+
"Örebro",
|
283 |
+
"Linköping",
|
284 |
+
"Helsingborg",
|
285 |
+
"Jönköping",
|
286 |
+
"Norrköping",
|
287 |
+
],
|
288 |
+
"Switzerland": [
|
289 |
+
"Zurich",
|
290 |
+
"Geneva",
|
291 |
+
"Basel",
|
292 |
+
"Bern",
|
293 |
+
"Lausanne",
|
294 |
+
"Winterthur",
|
295 |
+
"Lucerne",
|
296 |
+
"St. Gallen",
|
297 |
+
"Lugano",
|
298 |
+
"Biel/Bienne",
|
299 |
+
],
|
300 |
+
"Thailand": ["Bangkok", "Chiang Mai", "Pattaya", "Phuket", "Hat Yai"],
|
301 |
+
"Turkey": [
|
302 |
+
"Istanbul",
|
303 |
+
"Ankara",
|
304 |
+
"Izmir",
|
305 |
+
"Bursa",
|
306 |
+
"Adana",
|
307 |
+
"Gaziantep",
|
308 |
+
"Konya",
|
309 |
+
"Antalya",
|
310 |
+
"Kayseri",
|
311 |
+
"Mersin",
|
312 |
+
],
|
313 |
+
"Ukraine": ["Kyiv", "Kharkiv", "Odesa", "Dnipro", "Donetsk"],
|
314 |
+
"United Arab Emirates": ["Dubai", "Abu Dhabi", "Sharjah", "Al Ain", "Ajman"],
|
315 |
+
"United Kingdom": [
|
316 |
+
"London",
|
317 |
+
"Birmingham",
|
318 |
+
"Manchester",
|
319 |
+
"Leeds",
|
320 |
+
"Liverpool",
|
321 |
+
"Sheffield",
|
322 |
+
"Bristol",
|
323 |
+
"Glasgow",
|
324 |
+
"Leicester",
|
325 |
+
"Edinburgh",
|
326 |
+
],
|
327 |
+
"United States": [
|
328 |
+
"New York",
|
329 |
+
"Los Angeles",
|
330 |
+
"Chicago",
|
331 |
+
"Houston",
|
332 |
+
"Phoenix",
|
333 |
+
"Philadelphia",
|
334 |
+
"San Antonio",
|
335 |
+
"San Diego",
|
336 |
+
"Dallas",
|
337 |
+
"San Jose",
|
338 |
+
],
|
339 |
+
"Uruguay": ["Montevideo", "Salto", "Paysandú", "Las Piedras", "Rivera"],
|
340 |
+
"Venezuela": ["Caracas", "Maracaibo", "Valencia", "Barquisimeto", "Maracay"],
|
341 |
+
"Vietnam": ["Ho Chi Minh City", "Hanoi", "Da Nang", "Bien Hoa", "Hue"],
|
342 |
+
}
|
343 |
+
|
344 |
+
US_STATES = [
|
345 |
+
"Alabama",
|
346 |
+
"Alaska",
|
347 |
+
"Arizona",
|
348 |
+
"Arkansas",
|
349 |
+
"California",
|
350 |
+
"Colorado",
|
351 |
+
"Connecticut",
|
352 |
+
"Delaware",
|
353 |
+
"Florida",
|
354 |
+
"Georgia",
|
355 |
+
"Hawaii",
|
356 |
+
"Idaho",
|
357 |
+
"Illinois",
|
358 |
+
"Indiana",
|
359 |
+
"Iowa",
|
360 |
+
"Kansas",
|
361 |
+
"Kentucky",
|
362 |
+
"Louisiana",
|
363 |
+
"Maine",
|
364 |
+
"Maryland",
|
365 |
+
"Massachusetts",
|
366 |
+
"Michigan",
|
367 |
+
"Minnesota",
|
368 |
+
"Mississippi",
|
369 |
+
"Missouri",
|
370 |
+
"Montana",
|
371 |
+
"Nebraska",
|
372 |
+
"Nevada",
|
373 |
+
"New Hampshire",
|
374 |
+
"New Jersey",
|
375 |
+
"New Mexico",
|
376 |
+
"New York",
|
377 |
+
"North Carolina",
|
378 |
+
"North Dakota",
|
379 |
+
"Ohio",
|
380 |
+
"Oklahoma",
|
381 |
+
"Oregon",
|
382 |
+
"Pennsylvania",
|
383 |
+
"Rhode Island",
|
384 |
+
"South Carolina",
|
385 |
+
"South Dakota",
|
386 |
+
"Tennessee",
|
387 |
+
"Texas",
|
388 |
+
"Utah",
|
389 |
+
"Vermont",
|
390 |
+
"Virginia",
|
391 |
+
"Washington",
|
392 |
+
"West Virginia",
|
393 |
+
"Wisconsin",
|
394 |
+
"Wyoming",
|
395 |
+
"District of Columbia",
|
396 |
+
]
|
397 |
+
|
398 |
+
|
399 |
+
def get_coordinates_from_dropdown(country: str, city: str, state: str = None) -> tuple:
|
400 |
+
"""Get coordinates using dropdown selections"""
|
401 |
+
if not country or not city:
|
402 |
+
return None, "Please select both country and city."
|
403 |
+
|
404 |
+
if country == "United States" and state:
|
405 |
+
query = f"{city}, {state}, United States"
|
406 |
+
else:
|
407 |
+
query = f"{city}, {country}"
|
408 |
+
|
409 |
+
try:
|
410 |
+
url = "https://nominatim.openstreetmap.org/search"
|
411 |
+
params = {"q": query, "format": "json", "limit": 1}
|
412 |
+
headers = {"User-Agent": "ClimateRiskTool/1.0"}
|
413 |
+
|
414 |
+
response = requests.get(url, params=params, headers=headers, timeout=10)
|
415 |
+
data = response.json()
|
416 |
+
|
417 |
+
if data:
|
418 |
+
lat = float(data[0]["lat"])
|
419 |
+
lon = float(data[0]["lon"])
|
420 |
+
return (
|
421 |
+
lat,
|
422 |
+
lon,
|
423 |
+
), f"✅ Location found: {data[0].get('display_name', query)}"
|
424 |
+
else:
|
425 |
+
return None, f"❌ Location '{city}' not found in {country}."
|
426 |
+
|
427 |
+
except Exception as e:
|
428 |
+
return None, f"❌ Error looking up location: {str(e)}"
|
429 |
+
|
430 |
+
|
431 |
+
def create_risk_map(
|
432 |
+
lat: float, lon: float, city: str, country: str, risk_analysis: dict = None
|
433 |
+
) -> str:
|
434 |
+
import folium
|
435 |
+
|
436 |
+
# Carte centrée sur la ville
|
437 |
+
m = folium.Map(location=[lat, lon], zoom_start=10, tiles="OpenStreetMap")
|
438 |
+
|
439 |
+
# Définition du popup de risque
|
440 |
+
marker_color = "blue"
|
441 |
+
popup_html = f"<b>{city}, {country}</b><br>Lat: {lat:.4f}<br>Lon: {lon:.4f}"
|
442 |
+
|
443 |
+
# S'il y a des risques, complète le popup et change la couleur du marker
|
444 |
+
if risk_analysis and isinstance(risk_analysis, dict):
|
445 |
+
risks = risk_analysis.get("risk_analysis", {})
|
446 |
+
popup_html += "<br><b>Risks for this location:</b><br>"
|
447 |
+
max_level = 0
|
448 |
+
for risk_name, risk_data in risks.items():
|
449 |
+
if isinstance(risk_data, dict):
|
450 |
+
risk_level = risk_data.get("risk_level", 0)
|
451 |
+
max_level = max(max_level, risk_level)
|
452 |
+
if risk_level > 80:
|
453 |
+
emoji = "🔴"
|
454 |
+
elif risk_level > 60:
|
455 |
+
emoji = "🟠"
|
456 |
+
elif risk_level > 40:
|
457 |
+
emoji = "🟡"
|
458 |
+
elif risk_level > 20:
|
459 |
+
emoji = "🟢"
|
460 |
+
else:
|
461 |
+
emoji = "⚪"
|
462 |
+
popup_html += f"{emoji} <b>{risk_name.title()}:</b> {risk_level}/100<br>"
|
463 |
+
|
464 |
+
# Couleur du marker selon le risque max détecté
|
465 |
+
if max_level > 80:
|
466 |
+
marker_color = "red"
|
467 |
+
elif max_level > 60:
|
468 |
+
marker_color = "orange"
|
469 |
+
elif max_level > 40:
|
470 |
+
marker_color = "lightgreen"
|
471 |
+
elif max_level > 20:
|
472 |
+
marker_color = "green"
|
473 |
+
else:
|
474 |
+
marker_color = "blue"
|
475 |
+
|
476 |
+
# Création du marker principal
|
477 |
+
marker = folium.Marker(
|
478 |
+
[lat, lon],
|
479 |
+
popup=popup_html,
|
480 |
+
tooltip=f"Risks for {city}",
|
481 |
+
icon=folium.Icon(color=marker_color, icon="info-sign"),
|
482 |
+
)
|
483 |
+
marker.add_to(m)
|
484 |
+
|
485 |
+
# Script pour ouvrir automatiquement le popup à l’affichage
|
486 |
+
script = f"""
|
487 |
+
<script>
|
488 |
+
var marker = {marker.get_name()};
|
489 |
+
marker.openPopup();
|
490 |
+
</script>
|
491 |
+
"""
|
492 |
+
m.get_root().html.add_child(folium.Element(script))
|
493 |
+
|
494 |
+
# Légende
|
495 |
+
legend_html = """
|
496 |
+
<div style="position: fixed;
|
497 |
+
top: 10px; right: 10px; width: 280px; height: auto;
|
498 |
+
background-color: white; border: 2px solid grey; z-index: 9999;
|
499 |
+
font-size: 13px; padding: 12px; border-radius: 10px;
|
500 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.1);">
|
501 |
+
<h4 style="margin: 0 0 10px 0;">🗺️ Legend</h4>
|
502 |
+
<div><span style="color:red;">🔴</span> High risk<br>
|
503 |
+
<span style="color:orange;">🟠</span> Moderate risk<br>
|
504 |
+
<span style="color:yellow;">🟡</span> Mild risk<br>
|
505 |
+
<span style="color:green;">🟢</span> Low risk</div>
|
506 |
+
</div>
|
507 |
+
"""
|
508 |
+
m.get_root().html.add_child(folium.Element(legend_html))
|
509 |
+
|
510 |
+
return m._repr_html_()
|
511 |
+
|
512 |
+
def get_city_suggestions(country: str) -> str:
|
513 |
+
"""Return city suggestions for selected country"""
|
514 |
+
if country in COUNTRIES_AND_CITIES:
|
515 |
+
cities = COUNTRIES_AND_CITIES[country]
|
516 |
+
return f"💡 Popular cities in {country}: {', '.join(cities[:8])}..."
|
517 |
+
return ""
|
orchestrator.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
from agents.risk_agent import RiskAnalysisAgent
|
3 |
+
from agents.recommendation_agents import (
|
4 |
+
EmergencyRecommendationAgent,
|
5 |
+
HouseholdAdaptationAgent,
|
6 |
+
BusinessContinuityAgent,
|
7 |
+
FinancialAdaptationAgent,
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
class ClimateRiskOrchestrator:
|
12 |
+
"""Main orchestrator that coordinates all agents."""
|
13 |
+
|
14 |
+
def __init__(self, model):
|
15 |
+
self.risk_agent = RiskAnalysisAgent(model)
|
16 |
+
self.emergency_agent = EmergencyRecommendationAgent(model)
|
17 |
+
self.household_agent = HouseholdAdaptationAgent(model)
|
18 |
+
self.business_agent = BusinessContinuityAgent(model)
|
19 |
+
self.financial_agent = FinancialAdaptationAgent(model)
|
20 |
+
|
21 |
+
def analyze_and_recommend(self, user_query: str, user_profile: dict) -> dict:
|
22 |
+
"""Main method that coordinates risk analysis and recommendation generation."""
|
23 |
+
try:
|
24 |
+
print("🔍 Analyzing risks...")
|
25 |
+
risk_analysis = self.risk_agent.analyze_risks(user_query)
|
26 |
+
|
27 |
+
recommendations = {}
|
28 |
+
|
29 |
+
print("🚨 Generating emergency recommendations...")
|
30 |
+
recommendations["emergency"] = (
|
31 |
+
self.emergency_agent.generate_emergency_recommendations(
|
32 |
+
risk_analysis, user_profile
|
33 |
+
)
|
34 |
+
)
|
35 |
+
|
36 |
+
print("💰 Generating financial recommendations...")
|
37 |
+
recommendations["financial"] = (
|
38 |
+
self.financial_agent.generate_financial_recommendations(
|
39 |
+
risk_analysis, user_profile
|
40 |
+
)
|
41 |
+
)
|
42 |
+
|
43 |
+
profile_type = user_profile.get("type", "general").lower()
|
44 |
+
|
45 |
+
if profile_type in [
|
46 |
+
"household",
|
47 |
+
"residential",
|
48 |
+
"general",
|
49 |
+
"general public",
|
50 |
+
]:
|
51 |
+
print("🏠 Generating household recommendations...")
|
52 |
+
recommendations["household"] = (
|
53 |
+
self.household_agent.generate_household_recommendations(
|
54 |
+
risk_analysis, user_profile
|
55 |
+
)
|
56 |
+
)
|
57 |
+
|
58 |
+
if profile_type in ["business", "business owner"]:
|
59 |
+
print("🏢 Generating business recommendations...")
|
60 |
+
recommendations["business"] = (
|
61 |
+
self.business_agent.generate_business_recommendations(
|
62 |
+
risk_analysis, user_profile
|
63 |
+
)
|
64 |
+
)
|
65 |
+
|
66 |
+
return {
|
67 |
+
"risk_analysis": risk_analysis,
|
68 |
+
"recommendations": recommendations,
|
69 |
+
"generated_at": datetime.now().isoformat(),
|
70 |
+
"user_profile": user_profile,
|
71 |
+
}
|
72 |
+
|
73 |
+
except Exception as e:
|
74 |
+
return {
|
75 |
+
"error": f"Analysis failed: {str(e)}",
|
76 |
+
"risk_analysis": {},
|
77 |
+
"recommendations": {},
|
78 |
+
"generated_at": datetime.now().isoformat(),
|
79 |
+
}
|
recommendation_agents.py
ADDED
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import ast
|
3 |
+
from typing import List
|
4 |
+
import litellm
|
5 |
+
from smolagents import CodeAgent
|
6 |
+
from tools.tools import find_local_emergency_resources
|
7 |
+
|
8 |
+
|
9 |
+
class EmergencyRecommendationAgent:
|
10 |
+
"""Agent focused on emergency preparedness recommendations."""
|
11 |
+
|
12 |
+
def __init__(self, model):
|
13 |
+
self.agent = CodeAgent(
|
14 |
+
tools=[find_local_emergency_resources],
|
15 |
+
model=model,
|
16 |
+
additional_authorized_imports=["json", "datetime"],
|
17 |
+
)
|
18 |
+
|
19 |
+
def generate_emergency_recommendations(
|
20 |
+
self, risk_analysis: dict, user_profile: dict
|
21 |
+
) -> List[str]:
|
22 |
+
"""Generate emergency preparedness recommendations."""
|
23 |
+
|
24 |
+
prompt = f"""
|
25 |
+
You are an emergency preparedness expert. Based on this risk analysis and user profile, generate 3-7 specific, actionable emergency recommendations.
|
26 |
+
|
27 |
+
Risk Analysis: {str(risk_analysis)}
|
28 |
+
User Profile: {str(user_profile)}
|
29 |
+
|
30 |
+
Your recommendations should be:
|
31 |
+
- Specific and actionable
|
32 |
+
- Tailored to the identified risks
|
33 |
+
- Appropriate for the user's situation
|
34 |
+
- Prioritized by urgency/importance
|
35 |
+
|
36 |
+
Focus on immediate actions they can take to prepare for or mitigate the identified risks.
|
37 |
+
|
38 |
+
IMPORTANT: Return a simple Python list of strings, like this:
|
39 |
+
["Create an emergency kit with 72 hours of supplies", "Identify evacuation routes", "Install smoke detectors"]
|
40 |
+
|
41 |
+
Do not return JSON or any other format - just a Python list.
|
42 |
+
"""
|
43 |
+
|
44 |
+
try:
|
45 |
+
try:
|
46 |
+
response = self.agent.run(prompt)
|
47 |
+
except Exception as e:
|
48 |
+
print(e)
|
49 |
+
#response = self.agent.run(prompt)
|
50 |
+
#response = litellm.completion(messages=prompt, model="anthropic/claude-sonnet-4-20250514")
|
51 |
+
|
52 |
+
if isinstance(response, list):
|
53 |
+
return response
|
54 |
+
elif isinstance(response, str):
|
55 |
+
try:
|
56 |
+
return ast.literal_eval(response)
|
57 |
+
except (ValueError, SyntaxError):
|
58 |
+
try:
|
59 |
+
return json.loads(response)
|
60 |
+
except json.JSONDecodeError:
|
61 |
+
return self._extract_recommendations_from_text(response)
|
62 |
+
else:
|
63 |
+
return [
|
64 |
+
"Prepare emergency supplies",
|
65 |
+
"Review evacuation plans",
|
66 |
+
"Monitor weather alerts",
|
67 |
+
]
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
print(f"Emergency recommendations error: {e}")
|
71 |
+
return [
|
72 |
+
"Create an emergency kit with 72 hours of supplies",
|
73 |
+
"Identify and practice evacuation routes",
|
74 |
+
"Keep important documents in waterproof container",
|
75 |
+
"Monitor local emergency alerts and warnings",
|
76 |
+
]
|
77 |
+
|
78 |
+
def _extract_recommendations_from_text(self, text: str) -> List[str]:
|
79 |
+
"""Extract recommendations from text response."""
|
80 |
+
lines = text.split("\n")
|
81 |
+
recommendations = []
|
82 |
+
for line in lines:
|
83 |
+
line = line.strip()
|
84 |
+
if line and (
|
85 |
+
line.startswith("-") or line.startswith("•") or line.startswith("*")
|
86 |
+
):
|
87 |
+
recommendations.append(line[1:].strip())
|
88 |
+
elif line and line[0].isdigit() and "." in line:
|
89 |
+
recommendations.append(line.split(".", 1)[1].strip())
|
90 |
+
return (
|
91 |
+
recommendations[:7]
|
92 |
+
if recommendations
|
93 |
+
else ["Prepare emergency supplies", "Review evacuation plans"]
|
94 |
+
)
|
95 |
+
|
96 |
+
|
97 |
+
class HouseholdAdaptationAgent:
|
98 |
+
"""Agent for household-level climate adaptation recommendations."""
|
99 |
+
|
100 |
+
def __init__(self, model):
|
101 |
+
self.agent = CodeAgent(
|
102 |
+
tools=[], model=model, additional_authorized_imports=["json"]
|
103 |
+
)
|
104 |
+
|
105 |
+
def generate_household_recommendations(
|
106 |
+
self, risk_analysis: dict, user_profile: dict
|
107 |
+
) -> List[str]:
|
108 |
+
"""Generate household adaptation recommendations."""
|
109 |
+
|
110 |
+
prompt = f"""
|
111 |
+
You are a household climate adaptation specialist. Based on the risk analysis and user profile, generate 3-8 specific recommendations for household-level climate adaptations.
|
112 |
+
|
113 |
+
Risk Analysis: {str(risk_analysis)}
|
114 |
+
User Profile: {str(user_profile)}
|
115 |
+
|
116 |
+
Your recommendations should address:
|
117 |
+
- Home modifications for identified risks
|
118 |
+
- Energy efficiency improvements
|
119 |
+
- Comfort and health considerations
|
120 |
+
- Cost-effective solutions
|
121 |
+
- Long-term resilience building
|
122 |
+
|
123 |
+
Focus on practical, implementable actions that enhance the household's resilience to the identified climate risks.
|
124 |
+
|
125 |
+
IMPORTANT: Return a simple Python list of strings.
|
126 |
+
|
127 |
+
Do not return JSON - just a Python list.
|
128 |
+
"""
|
129 |
+
|
130 |
+
try:
|
131 |
+
try:
|
132 |
+
response = self.agent.run(prompt)
|
133 |
+
except Exception as e:
|
134 |
+
print(e)
|
135 |
+
#response = self.agent.run(prompt)
|
136 |
+
#response = litellm.completion(messages=prompt, model="anthropic/claude-sonnet-4-20250514")
|
137 |
+
|
138 |
+
if isinstance(response, list):
|
139 |
+
return response
|
140 |
+
elif isinstance(response, str):
|
141 |
+
try:
|
142 |
+
return ast.literal_eval(response)
|
143 |
+
except (ValueError, SyntaxError):
|
144 |
+
try:
|
145 |
+
return json.loads(response)
|
146 |
+
except json.JSONDecodeError:
|
147 |
+
return self._extract_recommendations_from_text(response)
|
148 |
+
else:
|
149 |
+
return [
|
150 |
+
"Improve home insulation",
|
151 |
+
"Install efficient heating/cooling",
|
152 |
+
"Weather-proof windows and doors",
|
153 |
+
]
|
154 |
+
|
155 |
+
except Exception as e:
|
156 |
+
print(f"Household recommendations error: {e}")
|
157 |
+
return [
|
158 |
+
"Improve home insulation to reduce energy costs",
|
159 |
+
"Install programmable thermostat",
|
160 |
+
"Weather-strip doors and windows",
|
161 |
+
"Consider backup power options",
|
162 |
+
]
|
163 |
+
|
164 |
+
def _extract_recommendations_from_text(self, text: str) -> List[str]:
|
165 |
+
"""Extract recommendations from text response."""
|
166 |
+
lines = text.split("\n")
|
167 |
+
recommendations = []
|
168 |
+
for line in lines:
|
169 |
+
line = line.strip()
|
170 |
+
if line and (
|
171 |
+
line.startswith("-") or line.startswith("•") or line.startswith("*")
|
172 |
+
):
|
173 |
+
recommendations.append(line[1:].strip())
|
174 |
+
elif line and line[0].isdigit() and "." in line:
|
175 |
+
recommendations.append(line.split(".", 1)[1].strip())
|
176 |
+
return (
|
177 |
+
recommendations[:8]
|
178 |
+
if recommendations
|
179 |
+
else ["Improve home insulation", "Install efficient heating/cooling"]
|
180 |
+
)
|
181 |
+
|
182 |
+
|
183 |
+
class BusinessContinuityAgent:
|
184 |
+
"""Agent for business continuity and adaptation recommendations."""
|
185 |
+
|
186 |
+
def __init__(self, model):
|
187 |
+
self.agent = CodeAgent(
|
188 |
+
tools=[], model=model, additional_authorized_imports=["json"]
|
189 |
+
)
|
190 |
+
|
191 |
+
def generate_business_recommendations(
|
192 |
+
self, risk_analysis: dict, user_profile: dict
|
193 |
+
) -> List[str]:
|
194 |
+
"""Generate business continuity recommendations."""
|
195 |
+
|
196 |
+
prompt = f"""
|
197 |
+
You are a business continuity and climate adaptation consultant. Generate 4-10 specific recommendations for business resilience based on the risk analysis and user profile.
|
198 |
+
|
199 |
+
Risk Analysis: {str(risk_analysis)}
|
200 |
+
User Profile: {str(user_profile)}
|
201 |
+
|
202 |
+
Consider:
|
203 |
+
- Operational continuity during climate events
|
204 |
+
- Supply chain resilience
|
205 |
+
- Infrastructure protection
|
206 |
+
- Employee safety
|
207 |
+
- Financial risk management
|
208 |
+
- Market opportunities in climate adaptation
|
209 |
+
|
210 |
+
Provide actionable, business-focused recommendations that address the specific risks identified.
|
211 |
+
|
212 |
+
IMPORTANT: Return a simple Python list of strings.
|
213 |
+
|
214 |
+
Do not return JSON - just a Python list.
|
215 |
+
"""
|
216 |
+
|
217 |
+
try:
|
218 |
+
try:
|
219 |
+
response = self.agent.run(prompt)
|
220 |
+
except Exception as e:
|
221 |
+
print(e)
|
222 |
+
#response = self.agent.run(prompt)
|
223 |
+
#response = litellm.completion(messages=prompt, model="anthropic/claude-sonnet-4-20250514")
|
224 |
+
|
225 |
+
if isinstance(response, list):
|
226 |
+
return response
|
227 |
+
elif isinstance(response, str):
|
228 |
+
try:
|
229 |
+
return ast.literal_eval(response)
|
230 |
+
except (ValueError, SyntaxError):
|
231 |
+
try:
|
232 |
+
return json.loads(response)
|
233 |
+
except json.JSONDecodeError:
|
234 |
+
return self._extract_recommendations_from_text(response)
|
235 |
+
else:
|
236 |
+
return [
|
237 |
+
"Develop business continuity plan",
|
238 |
+
"Review insurance coverage",
|
239 |
+
"Diversify supply chains",
|
240 |
+
]
|
241 |
+
|
242 |
+
except Exception as e:
|
243 |
+
print(f"Business recommendations error: {e}")
|
244 |
+
return [
|
245 |
+
"Develop comprehensive business continuity plan",
|
246 |
+
"Review and update insurance coverage",
|
247 |
+
"Diversify supply chain sources",
|
248 |
+
"Create employee safety protocols",
|
249 |
+
]
|
250 |
+
|
251 |
+
def _extract_recommendations_from_text(self, text: str) -> List[str]:
|
252 |
+
"""Extract recommendations from text response."""
|
253 |
+
lines = text.split("\n")
|
254 |
+
recommendations = []
|
255 |
+
for line in lines:
|
256 |
+
line = line.strip()
|
257 |
+
if line and (
|
258 |
+
line.startswith("-") or line.startswith("•") or line.startswith("*")
|
259 |
+
):
|
260 |
+
recommendations.append(line[1:].strip())
|
261 |
+
elif line and line[0].isdigit() and "." in line:
|
262 |
+
recommendations.append(line.split(".", 1)[1].strip())
|
263 |
+
return (
|
264 |
+
recommendations[:10]
|
265 |
+
if recommendations
|
266 |
+
else ["Develop business continuity plan", "Review insurance coverage"]
|
267 |
+
)
|
268 |
+
|
269 |
+
|
270 |
+
class FinancialAdaptationAgent:
|
271 |
+
"""Agent focused on financial planning and climate risk economics."""
|
272 |
+
|
273 |
+
def __init__(self, model):
|
274 |
+
self.agent = CodeAgent(
|
275 |
+
tools=[], model=model, additional_authorized_imports=["json"]
|
276 |
+
)
|
277 |
+
|
278 |
+
def generate_financial_recommendations(
|
279 |
+
self, risk_analysis: dict, user_profile: dict
|
280 |
+
) -> List[str]:
|
281 |
+
"""Generate financial planning recommendations for climate risks."""
|
282 |
+
|
283 |
+
prompt = f"""
|
284 |
+
You are a financial advisor specializing in climate risk management. Generate 4-7 specific financial recommendations based on the risk analysis.
|
285 |
+
|
286 |
+
Risk Analysis: {str(risk_analysis)}
|
287 |
+
User Profile: {str(user_profile)}
|
288 |
+
|
289 |
+
Address:
|
290 |
+
- Insurance coverage optimization
|
291 |
+
- Emergency fund planning
|
292 |
+
- Climate-resilient investments
|
293 |
+
- Government incentives and rebates
|
294 |
+
- Tax implications of adaptations
|
295 |
+
- Long-term financial planning for climate change
|
296 |
+
- Risk transfer mechanisms
|
297 |
+
|
298 |
+
Provide actionable financial strategies that help manage the economic impacts of identified climate risks.
|
299 |
+
|
300 |
+
IMPORTANT: Return a simple Python list of strings.
|
301 |
+
|
302 |
+
Do not return JSON - just a Python list.
|
303 |
+
"""
|
304 |
+
|
305 |
+
try:
|
306 |
+
try:
|
307 |
+
response = self.agent.run(prompt)
|
308 |
+
except Exception as e:
|
309 |
+
print(e)
|
310 |
+
#response = self.agent.run(prompt)
|
311 |
+
#response = litellm.completion(messages=prompt, model="anthropic/claude-sonnet-4-20250514")
|
312 |
+
|
313 |
+
if isinstance(response, list):
|
314 |
+
return response
|
315 |
+
elif isinstance(response, str):
|
316 |
+
try:
|
317 |
+
return ast.literal_eval(response)
|
318 |
+
except (ValueError, SyntaxError):
|
319 |
+
try:
|
320 |
+
return json.loads(response)
|
321 |
+
except json.JSONDecodeError:
|
322 |
+
return self._extract_recommendations_from_text(response)
|
323 |
+
else:
|
324 |
+
return [
|
325 |
+
"Review insurance coverage",
|
326 |
+
"Build emergency fund",
|
327 |
+
"Explore tax incentives",
|
328 |
+
]
|
329 |
+
|
330 |
+
except Exception as e:
|
331 |
+
print(f"Financial recommendations error: {e}")
|
332 |
+
return [
|
333 |
+
"Review and update insurance coverage for climate risks",
|
334 |
+
"Build emergency fund covering 3-6 months expenses",
|
335 |
+
"Explore government incentives for climate adaptations",
|
336 |
+
"Consider climate-resilient investment options",
|
337 |
+
]
|
338 |
+
|
339 |
+
def _extract_recommendations_from_text(self, text: str) -> List[str]:
|
340 |
+
"""Extract recommendations from text response."""
|
341 |
+
lines = text.split("\n")
|
342 |
+
recommendations = []
|
343 |
+
for line in lines:
|
344 |
+
line = line.strip()
|
345 |
+
if line and (
|
346 |
+
line.startswith("-") or line.startswith("•") or line.startswith("*")
|
347 |
+
):
|
348 |
+
recommendations.append(line[1:].strip())
|
349 |
+
elif line and line[0].isdigit() and "." in line:
|
350 |
+
recommendations.append(line.split(".", 1)[1].strip())
|
351 |
+
return (
|
352 |
+
recommendations[:7]
|
353 |
+
if recommendations
|
354 |
+
else ["Review insurance coverage", "Build emergency fund"]
|
355 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
smolagents
|
2 |
+
smolagents[litellm,gradio,mcp]
|
3 |
+
gradio
|
4 |
+
requests>=2.28.0
|
5 |
+
litellm>=1.0.0
|
6 |
+
python-dateutil>=2.8.0
|
7 |
+
pandas>=1.5.0
|
8 |
+
plotly>=5.0.0
|
9 |
+
folium>=0.14.0
|
10 |
+
urllib3>=1.26.0
|
11 |
+
pytest>=7.0.0
|
12 |
+
pytest-asyncio>=0.21.0
|
13 |
+
matplotlib>=3.7.0
|
14 |
+
seaborn>=0.12.0
|
15 |
+
fpdf>=1.7.2
|
risk_agent.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import litellm
|
3 |
+
from smolagents import CodeAgent
|
4 |
+
from tools.tools import (
|
5 |
+
get_coordinates,
|
6 |
+
get_earthquake_data,
|
7 |
+
get_nasa_fire_data,
|
8 |
+
get_full_daily_forecast,
|
9 |
+
climate_change_data,
|
10 |
+
get_full_air_quality_forecast,
|
11 |
+
get_full_marine_daily_forecast,
|
12 |
+
get_full_flood_daily_forecast,
|
13 |
+
get_full_satellite_radiation,
|
14 |
+
)
|
15 |
+
|
16 |
+
class RiskAnalysisAgent:
|
17 |
+
"""Agent responsible for analyzing risks based on user input and available data."""
|
18 |
+
|
19 |
+
def __init__(self, model):
|
20 |
+
self.agent = CodeAgent(
|
21 |
+
tools=[
|
22 |
+
get_coordinates,
|
23 |
+
get_earthquake_data,
|
24 |
+
get_nasa_fire_data,
|
25 |
+
get_full_daily_forecast,
|
26 |
+
climate_change_data,
|
27 |
+
get_full_air_quality_forecast,
|
28 |
+
get_full_marine_daily_forecast,
|
29 |
+
get_full_flood_daily_forecast,
|
30 |
+
get_full_satellite_radiation,
|
31 |
+
],
|
32 |
+
model=model,
|
33 |
+
additional_authorized_imports=["json", "datetime", "math"],
|
34 |
+
)
|
35 |
+
|
36 |
+
def analyze_risks(self, user_query: str) -> dict:
|
37 |
+
"""Analyze risks based on user query."""
|
38 |
+
|
39 |
+
analysis_prompt = f"""
|
40 |
+
You are an expert climate risk analyst. A user has submitted this query: "{user_query}"
|
41 |
+
|
42 |
+
Your task:
|
43 |
+
1. Extract the location from the query
|
44 |
+
2. Identify what types of risks they're asking about
|
45 |
+
3. Gather relevant data using the available tools:
|
46 |
+
- Use get_full_daily_forecast for detailed weather data
|
47 |
+
- Use climate_change_data for long-term climate projections
|
48 |
+
- Use get_full_air_quality_forecast for air quality risks
|
49 |
+
- Use get_full_marine_daily_forecast for coastal/marine risks
|
50 |
+
- Use get_full_flood_daily_forecast for detailed flood data
|
51 |
+
- Use get_full_satellite_radiation for solar radiation data
|
52 |
+
4. Analyze the risk levels for each identified hazard
|
53 |
+
5. Return a comprehensive risk analysis
|
54 |
+
|
55 |
+
For each risk type you identify, provide:
|
56 |
+
- Risk level (0-100 scale)
|
57 |
+
- Key factors contributing to the risk
|
58 |
+
- Time horizon (immediate, short-term, long-term)
|
59 |
+
- Confidence level in your assessment
|
60 |
+
- Historical context (when available)
|
61 |
+
- Future projections (when available)
|
62 |
+
|
63 |
+
Focus on being thorough but concise. Use your judgment to determine which data sources are most relevant.
|
64 |
+
|
65 |
+
IMPORTANT: Return a valid Python dictionary (not JSON string) in this exact format:
|
66 |
+
{{
|
67 |
+
"location": {{"city": "CityName", "country": "CountryName", "lat": 0.0, "lon": 0.0}},
|
68 |
+
"identified_risks": ["risk1", "risk2"],
|
69 |
+
"risk_analysis": {{
|
70 |
+
"earthquake": {{
|
71 |
+
"risk_level": 25,
|
72 |
+
"contributing_factors": ["seismic activity", "building codes"],
|
73 |
+
"time_horizon": "long-term",
|
74 |
+
"Detailed analysis": "",
|
75 |
+
"confidence": "medium",
|
76 |
+
"key_insights": "Moderate earthquake risk due to regional seismic activity",
|
77 |
+
"historical_context": "Historical earthquake data analysis",
|
78 |
+
"future_projections": "Projected seismic activity trends"
|
79 |
+
}},
|
80 |
+
"wildfire": {{
|
81 |
+
"risk_level": 60,
|
82 |
+
"contributing_factors": ["dry conditions", "vegetation"],
|
83 |
+
"time_horizon": "immediate",
|
84 |
+
"Detailed analysis": "",
|
85 |
+
"confidence": "high",
|
86 |
+
"key_insights": "High wildfire risk during dry season",
|
87 |
+
"historical_context": "Past wildfire patterns",
|
88 |
+
"future_projections": "Climate change impact on fire risk"
|
89 |
+
}},
|
90 |
+
"climate": {{
|
91 |
+
"risk_level": 45,
|
92 |
+
"contributing_factors": ["temperature trends", "precipitation changes"],
|
93 |
+
"time_horizon": "long-term",
|
94 |
+
"confidence": "high",
|
95 |
+
"Detailed analysis": "",
|
96 |
+
"key_insights": "Significant climate change impacts expected",
|
97 |
+
"future_projections": "Climate model predictions"
|
98 |
+
}}
|
99 |
+
}},
|
100 |
+
"overall_assessment": "Very detailed overall risk summary here"
|
101 |
+
}}
|
102 |
+
"""
|
103 |
+
|
104 |
+
try:
|
105 |
+
|
106 |
+
try:
|
107 |
+
response = self.agent.run(analysis_prompt)
|
108 |
+
except Exception as e:
|
109 |
+
print(e)
|
110 |
+
|
111 |
+
#response = litellm.completion(messages=analysis_prompt, model="anthropic/claude-sonnet-4-20250514")
|
112 |
+
|
113 |
+
if isinstance(response, dict):
|
114 |
+
return response
|
115 |
+
elif isinstance(response, str):
|
116 |
+
try:
|
117 |
+
return json.loads(response)
|
118 |
+
except json.JSONDecodeError:
|
119 |
+
return {
|
120 |
+
"location": {
|
121 |
+
"city": "Unknown",
|
122 |
+
"country": "Unknown",
|
123 |
+
"lat": 0.0,
|
124 |
+
"lon": 0.0,
|
125 |
+
},
|
126 |
+
"identified_risks": ["general climate risks"],
|
127 |
+
"risk_analysis": {
|
128 |
+
"general": {
|
129 |
+
"risk_level": 30,
|
130 |
+
"contributing_factors": ["climate change"],
|
131 |
+
"time_horizon": "long-term",
|
132 |
+
"confidence": "medium",
|
133 |
+
"key_insights": f"Analysis of query: {user_query}",
|
134 |
+
}
|
135 |
+
},
|
136 |
+
"overall_assessment": f"Climate risk analysis for: {user_query}",
|
137 |
+
}
|
138 |
+
else:
|
139 |
+
return {
|
140 |
+
"location": {
|
141 |
+
"city": "Unknown",
|
142 |
+
"country": "Unknown",
|
143 |
+
"lat": 0.0,
|
144 |
+
"lon": 0.0,
|
145 |
+
},
|
146 |
+
"identified_risks": ["climate risks"],
|
147 |
+
"risk_analysis": {
|
148 |
+
"general": {
|
149 |
+
"risk_level": 30,
|
150 |
+
"contributing_factors": ["climate factors"],
|
151 |
+
"time_horizon": "medium-term",
|
152 |
+
"confidence": "medium",
|
153 |
+
"key_insights": "General climate risk assessment",
|
154 |
+
}
|
155 |
+
},
|
156 |
+
"overall_assessment": "Basic climate risk analysis completed",
|
157 |
+
}
|
158 |
+
except Exception as e:
|
159 |
+
print(f"Risk analysis error: {e}")
|
160 |
+
return {
|
161 |
+
"error": f"Risk analysis failed: {str(e)}",
|
162 |
+
"location": {
|
163 |
+
"city": "Unknown",
|
164 |
+
"country": "Unknown",
|
165 |
+
"lat": 0.0,
|
166 |
+
"lon": 0.0,
|
167 |
+
},
|
168 |
+
"identified_risks": [],
|
169 |
+
"risk_analysis": {},
|
170 |
+
"overall_assessment": "Analysis could not be completed",
|
171 |
+
}
|
tools.py
ADDED
@@ -0,0 +1,721 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import requests
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import seaborn as sns
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
from config import NASA_FIRMS_MAP_KEY
|
8 |
+
from datetime import datetime, timedelta
|
9 |
+
from smolagents import tool
|
10 |
+
from fpdf import FPDF
|
11 |
+
|
12 |
+
|
13 |
+
@tool
|
14 |
+
def get_coordinates(city: str) -> dict:
|
15 |
+
"""Get latitude and longitude of a city using OpenStreetMap Nominatim API.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
city: Name of the city to get coordinates for
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
Dict with city name, latitude, longitude, or error message
|
22 |
+
"""
|
23 |
+
url = "https://nominatim.openstreetmap.org/search"
|
24 |
+
params = {"q": city, "format": "json", "limit": 1}
|
25 |
+
headers = {"User-Agent": "ClimateRiskTool/1.0"}
|
26 |
+
try:
|
27 |
+
response = requests.get(url, params=params, headers=headers, timeout=10)
|
28 |
+
data = response.json()
|
29 |
+
if not data:
|
30 |
+
return {"error": f"City '{city}' not found"}
|
31 |
+
return {
|
32 |
+
"city": city,
|
33 |
+
"latitude": float(data[0]["lat"]),
|
34 |
+
"longitude": float(data[0]["lon"]),
|
35 |
+
}
|
36 |
+
except Exception as e:
|
37 |
+
return {"error": str(e)}
|
38 |
+
|
39 |
+
|
40 |
+
@tool
|
41 |
+
def get_weather_forecast(lat: float, lon: float) -> dict:
|
42 |
+
"""Get weather forecast data for risk analysis.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
lat: Latitude coordinate
|
46 |
+
lon: Longitude coordinate
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
Dict with weather forecast data or error message
|
50 |
+
"""
|
51 |
+
url = "https://api.open-meteo.com/v1/forecast"
|
52 |
+
params = {
|
53 |
+
"latitude": lat,
|
54 |
+
"longitude": lon,
|
55 |
+
"daily": [
|
56 |
+
"temperature_2m_max",
|
57 |
+
"temperature_2m_min",
|
58 |
+
"precipitation_sum",
|
59 |
+
"wind_speed_10m_max",
|
60 |
+
"wind_gusts_10m_max",
|
61 |
+
"relative_humidity_2m_min",
|
62 |
+
],
|
63 |
+
"forecast_days": 7,
|
64 |
+
"timezone": "auto",
|
65 |
+
}
|
66 |
+
try:
|
67 |
+
response = requests.get(url, params=params, timeout=10)
|
68 |
+
return response.json()
|
69 |
+
except Exception as e:
|
70 |
+
return {"error": str(e)}
|
71 |
+
|
72 |
+
|
73 |
+
@tool
|
74 |
+
def get_flood_data(lat: float, lon: float) -> dict:
|
75 |
+
"""Get flood forecast data.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
lat: Latitude coordinate
|
79 |
+
lon: Longitude coordinate
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
Dict with flood forecast data or error message
|
83 |
+
"""
|
84 |
+
url = "https://flood-api.open-meteo.com/v1/flood"
|
85 |
+
params = {
|
86 |
+
"latitude": lat,
|
87 |
+
"longitude": lon,
|
88 |
+
"daily": ["river_discharge", "river_discharge_mean", "river_discharge_max"],
|
89 |
+
"forecast_days": 7,
|
90 |
+
}
|
91 |
+
try:
|
92 |
+
response = requests.get(url, params=params, timeout=10)
|
93 |
+
return response.json()
|
94 |
+
except Exception as e:
|
95 |
+
return {"error": str(e)}
|
96 |
+
|
97 |
+
|
98 |
+
@tool
|
99 |
+
def get_earthquake_data(
|
100 |
+
lat: float, lon: float, radius_km: float = 100, days: int = 30
|
101 |
+
) -> dict:
|
102 |
+
"""Get raw earthquake data from USGS.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
lat: Latitude coordinate
|
106 |
+
lon: Longitude coordinate
|
107 |
+
radius_km: Search radius in kilometers (default 100km)
|
108 |
+
days: Number of days to look back (default 30 days)
|
109 |
+
|
110 |
+
Returns:
|
111 |
+
Dict with raw earthquake data from USGS
|
112 |
+
"""
|
113 |
+
url = "https://earthquake.usgs.gov/fdsnws/event/1/query"
|
114 |
+
|
115 |
+
end_date = datetime.now()
|
116 |
+
start_date = end_date - timedelta(days=days)
|
117 |
+
|
118 |
+
params = {
|
119 |
+
"format": "geojson",
|
120 |
+
"starttime": start_date.strftime("%Y-%m-%d"),
|
121 |
+
"endtime": end_date.strftime("%Y-%m-%d"),
|
122 |
+
"latitude": lat,
|
123 |
+
"longitude": lon,
|
124 |
+
"maxradiuskm": radius_km,
|
125 |
+
"minmagnitude": 1.0,
|
126 |
+
"orderby": "time-desc",
|
127 |
+
}
|
128 |
+
|
129 |
+
try:
|
130 |
+
response = requests.get(url, params=params, timeout=15)
|
131 |
+
response.raise_for_status()
|
132 |
+
data = response.json()
|
133 |
+
|
134 |
+
earthquakes = []
|
135 |
+
for feature in data.get("features", []):
|
136 |
+
props = feature["properties"]
|
137 |
+
coords = feature["geometry"]["coordinates"]
|
138 |
+
|
139 |
+
earthquakes.append(
|
140 |
+
{
|
141 |
+
"magnitude": props.get("mag"),
|
142 |
+
"place": props.get("place"),
|
143 |
+
"time": props.get("time"),
|
144 |
+
"depth": coords[2] if len(coords) > 2 else None,
|
145 |
+
"latitude": coords[1],
|
146 |
+
"longitude": coords[0],
|
147 |
+
"alert": props.get("alert"),
|
148 |
+
"significance": props.get("sig"),
|
149 |
+
"event_type": props.get("type"),
|
150 |
+
"title": props.get("title"),
|
151 |
+
}
|
152 |
+
)
|
153 |
+
|
154 |
+
return {
|
155 |
+
"earthquakes": earthquakes,
|
156 |
+
"query_location": {
|
157 |
+
"lat": lat,
|
158 |
+
"lon": lon,
|
159 |
+
"radius_km": radius_km,
|
160 |
+
"days": days,
|
161 |
+
},
|
162 |
+
"data_source": "USGS",
|
163 |
+
}
|
164 |
+
|
165 |
+
except Exception as e:
|
166 |
+
return {"error": str(e)}
|
167 |
+
|
168 |
+
|
169 |
+
@tool
|
170 |
+
def get_nasa_fire_data(
|
171 |
+
lat: float, lon: float, radius_km: float = 50, days: int = 2
|
172 |
+
) -> dict:
|
173 |
+
"""Get raw wildfire detection data from NASA FIRMS satellites.
|
174 |
+
|
175 |
+
Args:
|
176 |
+
lat: Latitude coordinate
|
177 |
+
lon: Longitude coordinate
|
178 |
+
radius_km: Search radius in kilometers (default 50km)
|
179 |
+
days: Number of days to look back (default 2 days)
|
180 |
+
|
181 |
+
Returns:
|
182 |
+
Dict with raw fire detection data from NASA satellites
|
183 |
+
"""
|
184 |
+
if not NASA_FIRMS_MAP_KEY or NASA_FIRMS_MAP_KEY == "your-nasa-firms-api-key-here":
|
185 |
+
return {"error": "NASA FIRMS API key not configured in .env file"}
|
186 |
+
|
187 |
+
try:
|
188 |
+
lat_offset = radius_km / 111.0
|
189 |
+
lon_offset = radius_km / (111.0 * abs(math.cos(math.radians(lat))))
|
190 |
+
bbox = f"{lat - lat_offset},{lon - lon_offset},{lat + lat_offset},{lon + lon_offset}"
|
191 |
+
|
192 |
+
modis_url = f"https://firms.modaps.eosdis.nasa.gov/api/area/csv/{NASA_FIRMS_MAP_KEY}/MODIS_NRT/{bbox}/{days}"
|
193 |
+
viirs_url = f"https://firms.modaps.eosdis.nasa.gov/api/area/csv/{NASA_FIRMS_MAP_KEY}/VIIRS_NOAA20_NRT/{bbox}/{days}"
|
194 |
+
|
195 |
+
all_fires = []
|
196 |
+
|
197 |
+
try:
|
198 |
+
modis_response = requests.get(modis_url, timeout=15)
|
199 |
+
if modis_response.status_code == 200 and modis_response.text.strip():
|
200 |
+
all_fires.extend(_parse_nasa_csv(modis_response.text, "MODIS"))
|
201 |
+
except:
|
202 |
+
pass
|
203 |
+
|
204 |
+
try:
|
205 |
+
viirs_response = requests.get(viirs_url, timeout=15)
|
206 |
+
if viirs_response.status_code == 200 and viirs_response.text.strip():
|
207 |
+
all_fires.extend(_parse_nasa_csv(viirs_response.text, "VIIRS"))
|
208 |
+
except:
|
209 |
+
pass
|
210 |
+
|
211 |
+
return {
|
212 |
+
"fires": all_fires,
|
213 |
+
"query_location": {
|
214 |
+
"lat": lat,
|
215 |
+
"lon": lon,
|
216 |
+
"radius_km": radius_km,
|
217 |
+
"days": days,
|
218 |
+
},
|
219 |
+
"data_source": "NASA_FIRMS",
|
220 |
+
}
|
221 |
+
|
222 |
+
except Exception as e:
|
223 |
+
return {"error": str(e)}
|
224 |
+
|
225 |
+
|
226 |
+
def _parse_nasa_csv(csv_text: str, source: str) -> list:
|
227 |
+
"""Parse NASA FIRMS CSV data.
|
228 |
+
|
229 |
+
Args:
|
230 |
+
csv_text: CSV text data from NASA FIRMS API
|
231 |
+
source: Source identifier (MODIS or VIIRS)
|
232 |
+
|
233 |
+
Returns:
|
234 |
+
List of fire detection dictionaries
|
235 |
+
"""
|
236 |
+
fires = []
|
237 |
+
lines = csv_text.strip().split("\n")
|
238 |
+
|
239 |
+
if len(lines) < 2:
|
240 |
+
return fires
|
241 |
+
|
242 |
+
for line in lines[1:]:
|
243 |
+
try:
|
244 |
+
values = line.split(",")
|
245 |
+
if len(values) >= 9:
|
246 |
+
fires.append(
|
247 |
+
{
|
248 |
+
"latitude": float(values[0]),
|
249 |
+
"longitude": float(values[1]),
|
250 |
+
"brightness": float(values[2]) if values[2] else 0,
|
251 |
+
"scan": float(values[3]) if values[3] else 0,
|
252 |
+
"track": float(values[4]) if values[4] else 0,
|
253 |
+
"acq_date": values[5],
|
254 |
+
"acq_time": values[6],
|
255 |
+
"satellite": values[7],
|
256 |
+
"confidence": int(values[8]) if values[8].isdigit() else 50,
|
257 |
+
"version": values[9] if len(values) > 9 else "",
|
258 |
+
"bright_t31": (
|
259 |
+
float(values[10]) if len(values) > 10 and values[10] else 0
|
260 |
+
),
|
261 |
+
"frp": (
|
262 |
+
float(values[11]) if len(values) > 11 and values[11] else 0
|
263 |
+
),
|
264 |
+
"daynight": values[12] if len(values) > 12 else "",
|
265 |
+
"source": source,
|
266 |
+
}
|
267 |
+
)
|
268 |
+
except (ValueError, IndexError):
|
269 |
+
continue
|
270 |
+
|
271 |
+
return fires
|
272 |
+
|
273 |
+
|
274 |
+
@tool
|
275 |
+
def find_local_emergency_resources(lat: float, lon: float) -> dict:
|
276 |
+
"""Find local emergency resources and contacts.
|
277 |
+
|
278 |
+
Args:
|
279 |
+
lat: Latitude coordinate
|
280 |
+
lon: Longitude coordinate
|
281 |
+
|
282 |
+
Returns:
|
283 |
+
Dict with local emergency resources or error message
|
284 |
+
"""
|
285 |
+
try:
|
286 |
+
query = f"""
|
287 |
+
[out:json][timeout:15];
|
288 |
+
(
|
289 |
+
node[amenity=hospital](around:10000,{lat},{lon});
|
290 |
+
node[amenity=fire_station](around:10000,{lat},{lon});
|
291 |
+
node[amenity=police](around:10000,{lat},{lon});
|
292 |
+
);
|
293 |
+
out center meta;
|
294 |
+
"""
|
295 |
+
|
296 |
+
response = requests.post(
|
297 |
+
"https://overpass-api.de/api/interpreter", data=query, timeout=20
|
298 |
+
)
|
299 |
+
|
300 |
+
if response.status_code == 200:
|
301 |
+
data = response.json()
|
302 |
+
resources = []
|
303 |
+
|
304 |
+
for element in data.get("elements", [])[:5]:
|
305 |
+
tags = element.get("tags", {})
|
306 |
+
resources.append(
|
307 |
+
{
|
308 |
+
"name": tags.get("name", "Unnamed facility"),
|
309 |
+
"type": tags.get("amenity", "unknown"),
|
310 |
+
"latitude": element.get("lat", lat),
|
311 |
+
"longitude": element.get("lon", lon),
|
312 |
+
}
|
313 |
+
)
|
314 |
+
|
315 |
+
return {"local_resources": resources}
|
316 |
+
|
317 |
+
return {"local_resources": []}
|
318 |
+
|
319 |
+
except Exception as e:
|
320 |
+
return {"error": str(e)}
|
321 |
+
|
322 |
+
|
323 |
+
@tool
|
324 |
+
def generate_analysis_report(
|
325 |
+
data: dict, filename: str = "climate_risk_report.pdf"
|
326 |
+
) -> dict:
|
327 |
+
"""Generate a consolidated analysis report with visualizations.
|
328 |
+
|
329 |
+
Args:
|
330 |
+
data: Consolidated data from various tools, expected to include:
|
331 |
+
- weather forecast
|
332 |
+
- flood data
|
333 |
+
- earthquake data
|
334 |
+
- fire data
|
335 |
+
filename: Desired filename for the exported PDF report
|
336 |
+
|
337 |
+
Returns:
|
338 |
+
Dict with success message and file path or error
|
339 |
+
"""
|
340 |
+
try:
|
341 |
+
# Temporary directory for plots
|
342 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
343 |
+
# Initialize the PDF
|
344 |
+
pdf = FPDF()
|
345 |
+
pdf.set_auto_page_break(auto=True, margin=15)
|
346 |
+
pdf.add_page()
|
347 |
+
pdf.set_font("Arial", size=12)
|
348 |
+
pdf.set_text_color(50, 50, 50)
|
349 |
+
|
350 |
+
# Add Title
|
351 |
+
pdf.set_font("Arial", style="B", size=16)
|
352 |
+
pdf.cell(0, 10, "Climate Risk Analysis Report", ln=True, align="C")
|
353 |
+
pdf.ln(10) # Line break
|
354 |
+
|
355 |
+
# Helper function to save and plot visualizations
|
356 |
+
def save_plot(fig, plot_name):
|
357 |
+
path = f"{temp_dir}/{plot_name}.png"
|
358 |
+
fig.savefig(path)
|
359 |
+
plt.close(fig)
|
360 |
+
return path
|
361 |
+
|
362 |
+
# Plot weather data
|
363 |
+
weather_data = data.get("weather_forecast", {}).get("daily", {})
|
364 |
+
if weather_data:
|
365 |
+
dates = [
|
366 |
+
d for d in range(1, len(weather_data["temperature_2m_max"]) + 1)
|
367 |
+
]
|
368 |
+
weather_df = {
|
369 |
+
"Day": dates,
|
370 |
+
"Max Temperature (°C)": weather_data["temperature_2m_max"],
|
371 |
+
"Min Temperature (°C)": weather_data["temperature_2m_min"],
|
372 |
+
"Precipitation (mm)": weather_data["precipitation_sum"],
|
373 |
+
}
|
374 |
+
|
375 |
+
fig, ax = plt.subplots(figsize=(8, 5))
|
376 |
+
sns.lineplot(
|
377 |
+
x="Day",
|
378 |
+
y="Max Temperature (°C)",
|
379 |
+
data=weather_df,
|
380 |
+
ax=ax,
|
381 |
+
label="Max Temp",
|
382 |
+
color="red",
|
383 |
+
)
|
384 |
+
sns.lineplot(
|
385 |
+
x="Day",
|
386 |
+
y="Min Temperature (°C)",
|
387 |
+
data=weather_df,
|
388 |
+
ax=ax,
|
389 |
+
label="Min Temp",
|
390 |
+
color="blue",
|
391 |
+
)
|
392 |
+
sns.barplot(
|
393 |
+
x="Day",
|
394 |
+
y="Precipitation (mm)",
|
395 |
+
data=weather_df,
|
396 |
+
ax=ax,
|
397 |
+
color="gray",
|
398 |
+
alpha=0.5,
|
399 |
+
)
|
400 |
+
ax.set_title("Weather Forecast")
|
401 |
+
ax.set_xlabel("Day")
|
402 |
+
ax.set_ylabel("Values")
|
403 |
+
ax.legend()
|
404 |
+
|
405 |
+
weather_plot_path = save_plot(fig, "weather_plot")
|
406 |
+
pdf.image(weather_plot_path, x=10, y=None, w=180)
|
407 |
+
pdf.ln(10)
|
408 |
+
|
409 |
+
# Plot earthquake data
|
410 |
+
earthquake_data = data.get("earthquake_data", {}).get("earthquakes", [])
|
411 |
+
if earthquake_data:
|
412 |
+
magnitudes = [
|
413 |
+
eq["magnitude"] for eq in earthquake_data if eq.get("magnitude")
|
414 |
+
]
|
415 |
+
depths = [eq["depth"] for eq in earthquake_data if eq.get("depth")]
|
416 |
+
places = [eq["place"] for eq in earthquake_data]
|
417 |
+
|
418 |
+
fig, ax = plt.subplots(figsize=(8, 5))
|
419 |
+
sns.scatterplot(
|
420 |
+
x=depths, y=magnitudes, hue=places, ax=ax, palette="tab10", s=100
|
421 |
+
)
|
422 |
+
ax.set_title("Earthquake Analysis")
|
423 |
+
ax.set_xlabel("Depth (km)")
|
424 |
+
ax.set_ylabel("Magnitude")
|
425 |
+
ax.legend(bbox_to_anchor=(1.05, 1), loc="upper left")
|
426 |
+
|
427 |
+
earthquake_plot_path = save_plot(fig, "earthquake_plot")
|
428 |
+
pdf.image(earthquake_plot_path, x=10, y=None, w=180)
|
429 |
+
pdf.ln(10)
|
430 |
+
|
431 |
+
# Plot fire data
|
432 |
+
fire_data = data.get("fire_data", {}).get("fires", [])
|
433 |
+
if fire_data:
|
434 |
+
brightness = [fire["brightness"] for fire in fire_data]
|
435 |
+
confidence = [fire["confidence"] for fire in fire_data]
|
436 |
+
|
437 |
+
fig, ax = plt.subplots(figsize=(8, 5))
|
438 |
+
sns.histplot(
|
439 |
+
brightness,
|
440 |
+
bins=20,
|
441 |
+
ax=ax,
|
442 |
+
kde=True,
|
443 |
+
color="orange",
|
444 |
+
label="Brightness",
|
445 |
+
)
|
446 |
+
sns.histplot(
|
447 |
+
confidence,
|
448 |
+
bins=20,
|
449 |
+
ax=ax,
|
450 |
+
kde=True,
|
451 |
+
color="green",
|
452 |
+
alpha=0.5,
|
453 |
+
label="Confidence",
|
454 |
+
)
|
455 |
+
ax.set_title("Wildfire Brightness vs Confidence")
|
456 |
+
ax.set_xlabel("Value")
|
457 |
+
ax.legend()
|
458 |
+
|
459 |
+
fire_plot_path = save_plot(fig, "fire_plot")
|
460 |
+
pdf.image(fire_plot_path, x=10, y=None, w=180)
|
461 |
+
pdf.ln(10)
|
462 |
+
|
463 |
+
# Save PDF report
|
464 |
+
pdf_output_path = os.path.join(temp_dir, filename)
|
465 |
+
pdf.output(pdf_output_path)
|
466 |
+
return {"success": True, "file_path": pdf_output_path}
|
467 |
+
|
468 |
+
except Exception as e:
|
469 |
+
return {"error": str(e)}
|
470 |
+
|
471 |
+
|
472 |
+
@tool
|
473 |
+
def get_full_daily_forecast(lat: float, lon: float) -> dict:
|
474 |
+
"""
|
475 |
+
Get all available daily weather forecast parameters from Open-Meteo API.
|
476 |
+
Args:
|
477 |
+
lat: Latitude.
|
478 |
+
lon: Longitude.
|
479 |
+
Returns:
|
480 |
+
Dict with all daily forecast data or error.
|
481 |
+
"""
|
482 |
+
daily_params = [
|
483 |
+
"temperature_2m_max", "temperature_2m_mean", "temperature_2m_min",
|
484 |
+
"apparent_temperature_max", "apparent_temperature_mean", "apparent_temperature_min",
|
485 |
+
"precipitation_sum", "rain_sum", "showers_sum", "snowfall_sum",
|
486 |
+
"precipitation_hours",
|
487 |
+
"precipitation_probability_max", "precipitation_probability_mean", "precipitation_probability_min",
|
488 |
+
"weather_code", "sunrise", "sunset",
|
489 |
+
"sunshine_duration", "daylight_duration",
|
490 |
+
"wind_speed_10m_max", "wind_gusts_10m_max", "wind_direction_10m_dominant",
|
491 |
+
"shortwave_radiation_sum", "et0_fao_evapotranspiration",
|
492 |
+
"uv_index_max", "uv_index_clear_sky_max"
|
493 |
+
]
|
494 |
+
url = "https://api.open-meteo.com/v1/forecast"
|
495 |
+
params = {
|
496 |
+
"latitude": lat,
|
497 |
+
"longitude": lon,
|
498 |
+
"timezone": "auto",
|
499 |
+
"daily": ",".join(daily_params)
|
500 |
+
}
|
501 |
+
try:
|
502 |
+
response = requests.get(url, params=params, timeout=10)
|
503 |
+
return response.json()
|
504 |
+
except Exception as e:
|
505 |
+
return {"error": str(e)}
|
506 |
+
|
507 |
+
|
508 |
+
@tool
|
509 |
+
def climate_change_data(
|
510 |
+
lat: float,
|
511 |
+
lon: float,
|
512 |
+
start_date: str = "1950-01-01",
|
513 |
+
end_date: str = "2050-12-31",
|
514 |
+
models: list[str] = None
|
515 |
+
) -> dict:
|
516 |
+
"""
|
517 |
+
Get all available daily climate parameters from Open-Meteo Climate API.
|
518 |
+
Args:
|
519 |
+
lat: Latitude.
|
520 |
+
lon: Longitude.
|
521 |
+
start_date: Start date in yyyy-mm-dd (default 1950-01-01).
|
522 |
+
end_date: End date in yyyy-mm-dd (default 2050-12-31).
|
523 |
+
models: Optional list of climate models (default: all models).
|
524 |
+
Returns:
|
525 |
+
Dict with all daily climate data or error.
|
526 |
+
"""
|
527 |
+
daily_params = [
|
528 |
+
"temperature_2m_max", "temperature_2m_min", "temperature_2m_mean",
|
529 |
+
"cloud_cover_mean",
|
530 |
+
"relative_humidity_2m_max", "relative_humidity_2m_min", "relative_humidity_2m_mean",
|
531 |
+
"soil_moisture_0_to_10cm_mean",
|
532 |
+
"precipitation_sum", "rain_sum", "snowfall_sum",
|
533 |
+
"wind_speed_10m_mean", "wind_speed_10m_max",
|
534 |
+
"pressure_msl_mean",
|
535 |
+
"shortwave_radiation_sum"
|
536 |
+
]
|
537 |
+
if models is None:
|
538 |
+
models = [
|
539 |
+
"CMCC_CM2_VHR4", "FGOALS_f3_H", "HiRAM_SIT_HR",
|
540 |
+
"MRI_AGCM3_2_S", "EC_Earth3P_HR", "MPI_ESM1_2_XR", "NICAM16_8S"
|
541 |
+
]
|
542 |
+
url = "https://climate-api.open-meteo.com/v1/climate"
|
543 |
+
params = {
|
544 |
+
"latitude": lat,
|
545 |
+
"longitude": lon,
|
546 |
+
"start_date": start_date,
|
547 |
+
"end_date": end_date,
|
548 |
+
"models": ",".join(models),
|
549 |
+
"daily": ",".join(daily_params),
|
550 |
+
"timezone": "auto"
|
551 |
+
}
|
552 |
+
try:
|
553 |
+
response = requests.get(url, params=params, timeout=60)
|
554 |
+
return response.json()
|
555 |
+
except Exception as e:
|
556 |
+
return {"error": str(e)}
|
557 |
+
|
558 |
+
|
559 |
+
@tool
|
560 |
+
def get_full_air_quality_forecast(
|
561 |
+
lat: float,
|
562 |
+
lon: float,
|
563 |
+
forecast_days: int = 5,
|
564 |
+
past_days: int = 0,
|
565 |
+
domain: str = "auto"
|
566 |
+
) -> dict:
|
567 |
+
"""
|
568 |
+
Get all available hourly air quality forecast parameters from Open-Meteo Air Quality API.
|
569 |
+
Args:
|
570 |
+
lat: Latitude.
|
571 |
+
lon: Longitude.
|
572 |
+
forecast_days: Number of forecast days (default 5, max 7).
|
573 |
+
past_days: Number of past days (default 0, max 92).
|
574 |
+
domain: 'auto', 'cams_europe', or 'cams_global'.
|
575 |
+
Returns:
|
576 |
+
Dict with all hourly air quality data or error.
|
577 |
+
"""
|
578 |
+
hourly_params = [
|
579 |
+
"pm10", "pm2_5", "carbon_monoxide", "carbon_dioxide",
|
580 |
+
"nitrogen_dioxide", "sulphur_dioxide", "ozone", "aerosol_optical_depth",
|
581 |
+
"dust", "uv_index", "uv_index_clear_sky", "ammonia", "methane",
|
582 |
+
"alder_pollen", "birch_pollen", "grass_pollen", "mugwort_pollen",
|
583 |
+
"olive_pollen", "ragweed_pollen", "european_aqi", "us_aqi"
|
584 |
+
]
|
585 |
+
|
586 |
+
url = "https://air-quality-api.open-meteo.com/v1/air-quality"
|
587 |
+
params = {
|
588 |
+
"latitude": lat,
|
589 |
+
"longitude": lon,
|
590 |
+
"forecast_days": min(max(forecast_days, 0), 7),
|
591 |
+
"past_days": min(max(past_days, 0), 92),
|
592 |
+
"hourly": ",".join(hourly_params),
|
593 |
+
"domains": domain,
|
594 |
+
"timezone": "auto",
|
595 |
+
}
|
596 |
+
try:
|
597 |
+
response = requests.get(url, params=params, timeout=30)
|
598 |
+
return response.json()
|
599 |
+
except Exception as e:
|
600 |
+
return {"error": str(e)}
|
601 |
+
|
602 |
+
|
603 |
+
@tool
|
604 |
+
def get_full_marine_daily_forecast(lat: float, lon: float) -> dict:
|
605 |
+
"""
|
606 |
+
Get all available daily marine forecast parameters from Open-Meteo Marine API.
|
607 |
+
Args:
|
608 |
+
lat: Latitude.
|
609 |
+
lon: Longitude.
|
610 |
+
Returns:
|
611 |
+
Dict with all daily marine forecast data or error.
|
612 |
+
"""
|
613 |
+
daily_params = [
|
614 |
+
"wave_height_max", "wind_wave_height_max", "swell_wave_height_max",
|
615 |
+
"wave_direction_dominant", "wind_wave_direction_dominant", "swell_wave_direction_dominant",
|
616 |
+
"wave_period_max", "wind_wave_period_max", "swell_wave_period_max",
|
617 |
+
"wind_wave_peak_period_max", "swell_wave_peak_period_max"
|
618 |
+
]
|
619 |
+
url = "https://marine-api.open-meteo.com/v1/marine"
|
620 |
+
params = {
|
621 |
+
"latitude": lat,
|
622 |
+
"longitude": lon,
|
623 |
+
"timezone": "auto",
|
624 |
+
"daily": ",".join(daily_params)
|
625 |
+
}
|
626 |
+
try:
|
627 |
+
response = requests.get(url, params=params, timeout=10)
|
628 |
+
return response.json()
|
629 |
+
except Exception as e:
|
630 |
+
return {"error": str(e)}
|
631 |
+
|
632 |
+
|
633 |
+
@tool
|
634 |
+
def get_full_flood_daily_forecast(lat: float, lon: float) -> dict:
|
635 |
+
"""
|
636 |
+
Get all available daily flood parameters from Open-Meteo Flood API.
|
637 |
+
Args:
|
638 |
+
lat: Latitude.
|
639 |
+
lon: Longitude.
|
640 |
+
Returns:
|
641 |
+
Dict with all daily flood forecast data or error.
|
642 |
+
"""
|
643 |
+
daily_params = [
|
644 |
+
"river_discharge",
|
645 |
+
"river_discharge_mean",
|
646 |
+
"river_discharge_median",
|
647 |
+
"river_discharge_max",
|
648 |
+
"river_discharge_min",
|
649 |
+
"river_discharge_p25",
|
650 |
+
"river_discharge_p75"
|
651 |
+
]
|
652 |
+
url = "https://flood-api.open-meteo.com/v1/flood"
|
653 |
+
params = {
|
654 |
+
"latitude": lat,
|
655 |
+
"longitude": lon,
|
656 |
+
"daily": ",".join(daily_params)
|
657 |
+
}
|
658 |
+
try:
|
659 |
+
response = requests.get(url, params=params, timeout=10)
|
660 |
+
return response.json()
|
661 |
+
except Exception as e:
|
662 |
+
return {"error": str(e)}
|
663 |
+
|
664 |
+
@tool
|
665 |
+
def get_full_satellite_radiation(
|
666 |
+
lat: float,
|
667 |
+
lon: float,
|
668 |
+
start_date: str = None,
|
669 |
+
end_date: str = None,
|
670 |
+
hourly_native: bool = False,
|
671 |
+
tilt: int = 0,
|
672 |
+
azimuth: int = 0
|
673 |
+
) -> dict:
|
674 |
+
"""
|
675 |
+
Get all available hourly satellite solar radiation parameters from Open-Meteo Satellite API.
|
676 |
+
Args:
|
677 |
+
lat: Latitude.
|
678 |
+
lon: Longitude.
|
679 |
+
start_date: (optional) Start date (yyyy-mm-dd). If None, today.
|
680 |
+
end_date: (optional) End date (yyyy-mm-dd). If None, today.
|
681 |
+
hourly_native: Use native satellite temporal resolution (10/15/30min) if True, else hourly.
|
682 |
+
tilt: Tilt for GTI (default 0 = horizontal).
|
683 |
+
azimuth: Azimuth for GTI (default 0 = south).
|
684 |
+
Returns:
|
685 |
+
Dict with all hourly satellite solar radiation data or error.
|
686 |
+
"""
|
687 |
+
hourly_params = [
|
688 |
+
"shortwave_radiation", "diffuse_radiation", "direct_radiation",
|
689 |
+
"direct_normal_irradiance", "global_tilted_irradiance",
|
690 |
+
"terrestrial_radiation",
|
691 |
+
"shortwave_radiation_instant", "diffuse_radiation_instant", "direct_radiation_instant",
|
692 |
+
"direct_normal_irradiance_instant", "global_tilted_irradiance_instant",
|
693 |
+
"terrestrial_radiation_instant"
|
694 |
+
]
|
695 |
+
url = "https://satellite-api.open-meteo.com/v1/archive"
|
696 |
+
|
697 |
+
today = datetime.utcnow().date()
|
698 |
+
if start_date is None:
|
699 |
+
start_date = str(today)
|
700 |
+
if end_date is None:
|
701 |
+
end_date = str(today)
|
702 |
+
|
703 |
+
params = {
|
704 |
+
"latitude": lat,
|
705 |
+
"longitude": lon,
|
706 |
+
"start_date": start_date,
|
707 |
+
"end_date": end_date,
|
708 |
+
"hourly": ",".join(hourly_params),
|
709 |
+
"models": "satellite_radiation_seamless",
|
710 |
+
"timezone": "auto",
|
711 |
+
"tilt": tilt,
|
712 |
+
"azimuth": azimuth,
|
713 |
+
}
|
714 |
+
if hourly_native:
|
715 |
+
params["hourly_native"] = "true"
|
716 |
+
|
717 |
+
try:
|
718 |
+
response = requests.get(url, params=params, timeout=30)
|
719 |
+
return response.json()
|
720 |
+
except Exception as e:
|
721 |
+
return {"error": str(e)}
|
ui.py
ADDED
@@ -0,0 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from datetime import datetime
|
3 |
+
import sys
|
4 |
+
import threading
|
5 |
+
from agents.orchestrator import ClimateRiskOrchestrator
|
6 |
+
from tools.mapping_utils import (
|
7 |
+
COUNTRIES_AND_CITIES,
|
8 |
+
US_STATES,
|
9 |
+
get_coordinates_from_dropdown,
|
10 |
+
create_risk_map,
|
11 |
+
get_city_suggestions,
|
12 |
+
)
|
13 |
+
|
14 |
+
# === LogCatcher ===
|
15 |
+
class LogCatcher:
|
16 |
+
def __init__(self):
|
17 |
+
self.buffer = ""
|
18 |
+
self.lock = threading.Lock()
|
19 |
+
self._stdout = sys.stdout
|
20 |
+
self._stderr = sys.stderr
|
21 |
+
|
22 |
+
def write(self, msg):
|
23 |
+
with self.lock:
|
24 |
+
self.buffer += msg
|
25 |
+
self._stdout.write(msg)
|
26 |
+
|
27 |
+
def flush(self):
|
28 |
+
pass
|
29 |
+
|
30 |
+
def get_logs(self):
|
31 |
+
with self.lock:
|
32 |
+
return self.buffer
|
33 |
+
|
34 |
+
def clear(self):
|
35 |
+
with self.lock:
|
36 |
+
self.buffer = ""
|
37 |
+
|
38 |
+
def redirect(self):
|
39 |
+
sys.stdout = self
|
40 |
+
sys.stderr = self
|
41 |
+
|
42 |
+
def restore(self):
|
43 |
+
sys.stdout = self._stdout
|
44 |
+
sys.stderr = self._stderr
|
45 |
+
|
46 |
+
def isatty(self):
|
47 |
+
return False
|
48 |
+
|
49 |
+
def fileno(self):
|
50 |
+
return self._stdout.fileno()
|
51 |
+
|
52 |
+
logcatcher = LogCatcher()
|
53 |
+
logcatcher.redirect()
|
54 |
+
|
55 |
+
class ClimateRiskUI:
|
56 |
+
"""User interface for the climate risk system with dropdown and map functionality."""
|
57 |
+
|
58 |
+
def __init__(self, model):
|
59 |
+
self.orchestrator = ClimateRiskOrchestrator(model)
|
60 |
+
self.theme = gr.themes.Soft(
|
61 |
+
primary_hue="blue", secondary_hue="gray", neutral_hue="slate"
|
62 |
+
)
|
63 |
+
|
64 |
+
def update_business_visibility(self, profile_type):
|
65 |
+
show_business = profile_type == "Business Owner"
|
66 |
+
return gr.Dropdown(visible=show_business)
|
67 |
+
|
68 |
+
def analyze_with_dropdown(
|
69 |
+
self,
|
70 |
+
country,
|
71 |
+
city,
|
72 |
+
state,
|
73 |
+
profile_type,
|
74 |
+
business_type,
|
75 |
+
vulnerable_groups,
|
76 |
+
):
|
77 |
+
logcatcher.clear()
|
78 |
+
|
79 |
+
if not country or not city:
|
80 |
+
return (
|
81 |
+
"Please select both country and city.",
|
82 |
+
"",
|
83 |
+
"",
|
84 |
+
)
|
85 |
+
|
86 |
+
coords_result, validation_message = get_coordinates_from_dropdown(country, city, state)
|
87 |
+
if coords_result is None:
|
88 |
+
return validation_message, "", ""
|
89 |
+
|
90 |
+
lat, lon = coords_result
|
91 |
+
|
92 |
+
state_info = f", {state}" if state else ""
|
93 |
+
location_full = f"{city}{state_info}, {country}"
|
94 |
+
|
95 |
+
base_query = f"Perform a comprehensive climate risk assessment for {location_full}."
|
96 |
+
|
97 |
+
profile_context = ""
|
98 |
+
if profile_type.lower() == "business owner":
|
99 |
+
business_detail = f" as a {business_type}" if business_type else ""
|
100 |
+
profile_context = (
|
101 |
+
f" Focus on business continuity risks{business_detail}, including supply chain vulnerabilities, operational disruptions, infrastructure threats, customer safety, inventory protection, and revenue continuity. Consider industry-specific vulnerabilities and regulatory compliance requirements."
|
102 |
+
)
|
103 |
+
elif profile_type.lower() == "electric utility":
|
104 |
+
profile_context = " Emphasize electric utility risks including power outages, overloads, low reserve generation capacity, extreme weather impacts on electric utility assets, and catastrophic wildfires potential."
|
105 |
+
elif profile_type.lower() == "emergency manager":
|
106 |
+
profile_context = " Prioritize emergency management perspectives including evacuation planning, critical infrastructure vulnerabilities, community preparedness needs, and multi-hazard scenarios."
|
107 |
+
else:
|
108 |
+
profile_context = " Focus on residential safety, household preparedness, health impacts, and community-level risks."
|
109 |
+
|
110 |
+
vulnerable_context = ""
|
111 |
+
if vulnerable_groups:
|
112 |
+
groups_text = ", ".join(vulnerable_groups)
|
113 |
+
vulnerable_context = f" Pay special attention to impacts on vulnerable populations: {groups_text}."
|
114 |
+
|
115 |
+
analysis_requirements = (
|
116 |
+
" Analyze earthquake, wildfire, flood, and extreme weather risks. Provide specific risk levels (0-100 scale), contributing factors, time horizons, and confidence levels. Include recent data and current conditions."
|
117 |
+
)
|
118 |
+
|
119 |
+
user_query = base_query + profile_context + vulnerable_context + analysis_requirements
|
120 |
+
|
121 |
+
user_profile = {
|
122 |
+
"type": profile_type.lower(),
|
123 |
+
"business_type": business_type if profile_type.lower() == "business owner" else None,
|
124 |
+
"vulnerable_groups": vulnerable_groups or [],
|
125 |
+
}
|
126 |
+
|
127 |
+
print(f"[{datetime.now()}] Analyse : {user_query}")
|
128 |
+
result = self.orchestrator.analyze_and_recommend(user_query, user_profile)
|
129 |
+
|
130 |
+
if "error" in result:
|
131 |
+
print(f"[ERROR] {result['error']}")
|
132 |
+
return f"Error: {result['error']}", "", ""
|
133 |
+
|
134 |
+
risk_summary = self._format_risk_analysis(result["risk_analysis"])
|
135 |
+
recommendations_text = self._format_recommendations(result["recommendations"], profile_type)
|
136 |
+
enhanced_map = create_risk_map(lat, lon, city, country, result["risk_analysis"])
|
137 |
+
|
138 |
+
return risk_summary, recommendations_text, enhanced_map
|
139 |
+
|
140 |
+
def update_map_from_location(self, country, city, state=None):
|
141 |
+
if not country or not city:
|
142 |
+
return "Please select both country and city.", ""
|
143 |
+
coords_result, validation_message = get_coordinates_from_dropdown(country, city, state)
|
144 |
+
if coords_result is None:
|
145 |
+
return validation_message, ""
|
146 |
+
lat, lon = coords_result
|
147 |
+
risk_map = create_risk_map(lat, lon, city, country)
|
148 |
+
return validation_message, risk_map
|
149 |
+
|
150 |
+
def update_cities(self, country):
|
151 |
+
suggestions = get_city_suggestions(country)
|
152 |
+
show_state = country == "United States"
|
153 |
+
country_centers = {
|
154 |
+
"France": (48.8566, 2.3522),
|
155 |
+
"United States": (39.8283, -98.5795),
|
156 |
+
"United Kingdom": (51.5074, -0.1278),
|
157 |
+
"Germany": (52.5200, 13.4050),
|
158 |
+
"Japan": (35.6762, 139.6503),
|
159 |
+
"Canada": (45.4215, -75.7040),
|
160 |
+
"Australia": (-35.2809, 149.1300),
|
161 |
+
"Italy": (41.9028, 12.4964),
|
162 |
+
"Spain": (40.4168, -3.7038),
|
163 |
+
"China": (39.9042, 116.4074),
|
164 |
+
"India": (28.6139, 77.2090),
|
165 |
+
"Brazil": (-15.7975, -47.8919),
|
166 |
+
}
|
167 |
+
lat, lon = country_centers.get(country, (48.8566, 2.3522))
|
168 |
+
basic_map = create_risk_map(lat, lon, f"Select a city in {country}", country)
|
169 |
+
return suggestions, gr.Dropdown(visible=show_state), basic_map
|
170 |
+
|
171 |
+
def analyze_user_input(
|
172 |
+
self,
|
173 |
+
user_query: str,
|
174 |
+
profile_type: str,
|
175 |
+
business_type: str,
|
176 |
+
vulnerable_groups: list = None,
|
177 |
+
):
|
178 |
+
logcatcher.clear()
|
179 |
+
|
180 |
+
if not user_query.strip():
|
181 |
+
return (
|
182 |
+
"Please enter your climate risk question or location.",
|
183 |
+
"",
|
184 |
+
"<div style='text-align: center; padding: 50px; background-color: #f0f0f0; border-radius: 10px;'>Map will appear here after analysis.</div>",
|
185 |
+
)
|
186 |
+
|
187 |
+
user_profile = {
|
188 |
+
"type": profile_type.lower(),
|
189 |
+
"business_type": business_type if profile_type.lower() == "business owner" else None,
|
190 |
+
"vulnerable_groups": vulnerable_groups or [],
|
191 |
+
}
|
192 |
+
|
193 |
+
print(f"[{datetime.now()}] Analyse: {user_query}")
|
194 |
+
result = self.orchestrator.analyze_and_recommend(user_query, user_profile)
|
195 |
+
|
196 |
+
if "error" in result:
|
197 |
+
print(f"[ERROR] {result['error']}")
|
198 |
+
return f"Error: {result['error']}", "", ""
|
199 |
+
|
200 |
+
risk_summary = self._format_risk_analysis(result["risk_analysis"])
|
201 |
+
recommendations_text = self._format_recommendations(result["recommendations"], profile_type)
|
202 |
+
|
203 |
+
location = result["risk_analysis"].get("location", {})
|
204 |
+
lat = location.get("lat", 0)
|
205 |
+
lon = location.get("lon", 0)
|
206 |
+
city = location.get("city", "Unknown")
|
207 |
+
country = location.get("country", "Unknown")
|
208 |
+
|
209 |
+
enhanced_map = create_risk_map(lat, lon, city, country, result["risk_analysis"])
|
210 |
+
|
211 |
+
return risk_summary, recommendations_text, enhanced_map
|
212 |
+
|
213 |
+
def _format_risk_analysis(self, risk_analysis: dict) -> str:
|
214 |
+
if not risk_analysis or "error" in risk_analysis:
|
215 |
+
return "Risk analysis not available or failed."
|
216 |
+
|
217 |
+
formatted = f"# 🌍 Climate Risk Analysis\n\n"
|
218 |
+
|
219 |
+
location = risk_analysis.get("location", {})
|
220 |
+
if location:
|
221 |
+
formatted += f"**Location:** {location.get('city', 'Unknown')}, {location.get('country', '')}\n"
|
222 |
+
formatted += f"**Coordinates:** {location.get('lat', 0):.4f}°N, {location.get('lon', 0):.4f}°E\n\n"
|
223 |
+
|
224 |
+
formatted += f"**Analysis Date:** {datetime.now().strftime('%Y-%m-%d %H:%M')}\n\n"
|
225 |
+
|
226 |
+
overall = risk_analysis.get("overall_assessment", "No overall assessment available.")
|
227 |
+
formatted += f"## 📊 Overall Assessment\n{overall}\n\n"
|
228 |
+
|
229 |
+
risks = risk_analysis.get("risk_analysis", {})
|
230 |
+
if risks:
|
231 |
+
formatted += "## 🎯 Individual Risk Assessment\n\n"
|
232 |
+
for risk_name, risk_data in risks.items():
|
233 |
+
if isinstance(risk_data, dict):
|
234 |
+
risk_level = risk_data.get("risk_level", 0)
|
235 |
+
if risk_level > 80:
|
236 |
+
emoji = "🔴"
|
237 |
+
level_text = "VERY HIGH"
|
238 |
+
elif risk_level > 60:
|
239 |
+
emoji = "🟠"
|
240 |
+
level_text = "HIGH"
|
241 |
+
elif risk_level > 40:
|
242 |
+
emoji = "🟡"
|
243 |
+
level_text = "MODERATE"
|
244 |
+
elif risk_level > 20:
|
245 |
+
emoji = "🟢"
|
246 |
+
level_text = "LOW"
|
247 |
+
else:
|
248 |
+
emoji = "⚪"
|
249 |
+
level_text = "MINIMAL"
|
250 |
+
formatted += f"### {emoji} {risk_name.title()} Risk\n"
|
251 |
+
formatted += f"**Risk Level:** {level_text} ({risk_level}/100)\n"
|
252 |
+
formatted += f"**Time Horizon:** {risk_data.get('time_horizon', 'Unknown')}\n"
|
253 |
+
formatted += f"**Confidence:** {risk_data.get('confidence', 'Unknown')}\n\n"
|
254 |
+
if risk_data.get("key_insights"):
|
255 |
+
formatted += f"**Analysis:** {risk_data['key_insights']}\n\n"
|
256 |
+
factors = risk_data.get("contributing_factors", [])
|
257 |
+
if factors:
|
258 |
+
formatted += f"**Key Factors:** {', '.join(factors)}\n\n"
|
259 |
+
return formatted
|
260 |
+
|
261 |
+
def _format_recommendations(self, recommendations: dict, profile_type: str) -> str:
|
262 |
+
if not recommendations:
|
263 |
+
return "No recommendations available."
|
264 |
+
formatted = f"# 🎯 Personalized Recommendations for {profile_type} **[survivalist mode]**\n\n"
|
265 |
+
if "emergency" in recommendations:
|
266 |
+
formatted += "## 🚨 Emergency Preparedness\n"
|
267 |
+
for rec in recommendations["emergency"]:
|
268 |
+
formatted += f"- {rec}\n"
|
269 |
+
formatted += "\n"
|
270 |
+
if "household" in recommendations:
|
271 |
+
formatted += "## 🏠 Household Adaptations\n"
|
272 |
+
for rec in recommendations["household"]:
|
273 |
+
formatted += f"- {rec}\n"
|
274 |
+
formatted += "\n"
|
275 |
+
if "business" in recommendations:
|
276 |
+
formatted += "## 🏢 Business Continuity\n"
|
277 |
+
for rec in recommendations["business"]:
|
278 |
+
formatted += f"- {rec}\n"
|
279 |
+
formatted += "\n"
|
280 |
+
if "financial" in recommendations:
|
281 |
+
formatted += "## 💰 Financial Planning\n"
|
282 |
+
for rec in recommendations["financial"]:
|
283 |
+
formatted += f"- {rec}\n"
|
284 |
+
formatted += "\n"
|
285 |
+
formatted += "---\n"
|
286 |
+
formatted += "*Recommendations generated by AI agents based on current risk analysis and your profile.*"
|
287 |
+
return formatted
|
288 |
+
|
289 |
+
def create_interface(self):
|
290 |
+
def get_logs():
|
291 |
+
return logcatcher.get_logs()
|
292 |
+
|
293 |
+
with gr.Blocks(
|
294 |
+
theme=self.theme, title="🛰️ CAVA-AI – Agentic AI based Climate Adaption & Vulnerability Assessment Tool"
|
295 |
+
) as app:
|
296 |
+
|
297 |
+
gr.Markdown(
|
298 |
+
"""
|
299 |
+
# 🛰️ CAVA-AI – Agentic AI based Climate Adaption & Vulnerability Assessment Tool
|
300 |
+
|
301 |
+
<div style='background: linear-gradient(90deg, #f6f8fa 0%, #e2eafc 100%); border-radius: 10px; padding: 16px 18px; font-size: 16px; margin-bottom: 10px;'>
|
302 |
+
<b>🤖 What does CAVA-AI do?</b>
|
303 |
+
<br><br>
|
304 |
+
CAVA-AI's AI agents instantly analyze climate risks <b>(
|
305 |
+
🌪️ Weather,
|
306 |
+
🌊 Flood,
|
307 |
+
🌍 Earthquake,
|
308 |
+
🔥 Wildfire,
|
309 |
+
🌫️ Air quality,
|
310 |
+
📈 Climate trends,
|
311 |
+
☀️ Solar radiation,
|
312 |
+
🌊 Marine forecast
|
313 |
+
)</b> for a specified location, providing you with clear, actionable recommendations.
|
314 |
+
<br><br>
|
315 |
+
<i>Analysis is fully automated, always up to date, and based on leading data sources: OpenStreetMap 🗺️, Open-Meteo 🌦️, USGS 🌎, NASA FIRMS 🔥.</i>
|
316 |
+
<br><br>
|
317 |
+
<b>How to use CAVA-AI?</b><br>
|
318 |
+
Use the <b>quick location selection</b> (dropdowns and map) 🌍, or ask complex, personalized questions in <b>natural language</b> 💬.
|
319 |
+
</div>
|
320 |
+
"""
|
321 |
+
)
|
322 |
+
|
323 |
+
with gr.Tabs():
|
324 |
+
with gr.TabItem("📍 Quick Location Selection"):
|
325 |
+
with gr.Row():
|
326 |
+
with gr.Column():
|
327 |
+
country_dropdown = gr.Dropdown(
|
328 |
+
choices=list(COUNTRIES_AND_CITIES.keys()),
|
329 |
+
label="Select Country",
|
330 |
+
value="United States",
|
331 |
+
interactive=True,
|
332 |
+
)
|
333 |
+
city_input = gr.Textbox(
|
334 |
+
label="Enter City Name",
|
335 |
+
placeholder="e.g., Los Angeles, San Francisco, San Diego, ...",
|
336 |
+
value="Pomona",
|
337 |
+
interactive=True,
|
338 |
+
info="Enter any city name in the selected country",
|
339 |
+
)
|
340 |
+
state_dropdown = gr.Dropdown(
|
341 |
+
choices=US_STATES,
|
342 |
+
label="Select State (US only)",
|
343 |
+
value="California",
|
344 |
+
visible=False,
|
345 |
+
interactive=True,
|
346 |
+
info="Select state for US locations",
|
347 |
+
)
|
348 |
+
city_suggestions = gr.Markdown(
|
349 |
+
get_city_suggestions("Los Angeles"), visible=True
|
350 |
+
)
|
351 |
+
|
352 |
+
with gr.Column():
|
353 |
+
profile_dropdown = gr.Dropdown(
|
354 |
+
choices=[
|
355 |
+
"General Public",
|
356 |
+
"Business Owner",
|
357 |
+
"Electric Utility",
|
358 |
+
"Emergency Manager",
|
359 |
+
],
|
360 |
+
label="Your Profile",
|
361 |
+
value="General Public",
|
362 |
+
)
|
363 |
+
vulnerable_groups = gr.CheckboxGroup(
|
364 |
+
choices=[
|
365 |
+
"Elderly",
|
366 |
+
"Children",
|
367 |
+
"Chronic Health Conditions",
|
368 |
+
"Pregnant",
|
369 |
+
],
|
370 |
+
label="Vulnerable Groups in Household",
|
371 |
+
)
|
372 |
+
business_type_dropdown = gr.Dropdown(
|
373 |
+
choices=[
|
374 |
+
"Restaurant/Food Service",
|
375 |
+
"Retail Store",
|
376 |
+
"Manufacturing",
|
377 |
+
"Construction",
|
378 |
+
"Healthcare Facility",
|
379 |
+
"Educational Institution",
|
380 |
+
"Technology/Software",
|
381 |
+
"Transportation/Logistics",
|
382 |
+
"Tourism/Hospitality",
|
383 |
+
"Financial Services",
|
384 |
+
"Real Estate",
|
385 |
+
"Agriculture/Farming",
|
386 |
+
"Energy/Utilities",
|
387 |
+
"Entertainment/Events",
|
388 |
+
"Professional Services",
|
389 |
+
"Small Office",
|
390 |
+
"Warehouse/Distribution",
|
391 |
+
"Other",
|
392 |
+
],
|
393 |
+
label="Business Type",
|
394 |
+
value="Retail Store",
|
395 |
+
visible=False,
|
396 |
+
interactive=True,
|
397 |
+
info="Select your business type for specialized recommendations",
|
398 |
+
)
|
399 |
+
|
400 |
+
with gr.Row():
|
401 |
+
analyze_location_btn = gr.Button(
|
402 |
+
"🔍 Analyze This Location", variant="primary", size="lg"
|
403 |
+
)
|
404 |
+
|
405 |
+
with gr.Row():
|
406 |
+
gr.HTML("""
|
407 |
+
<div style="display: flex; align-items: center; gap: 10px;">
|
408 |
+
<h3 style="margin: 0;">🛰️ Agentic Logs</h3>
|
409 |
+
</div>
|
410 |
+
""")
|
411 |
+
|
412 |
+
with gr.Row():
|
413 |
+
logs_box = gr.Textbox(
|
414 |
+
value=logcatcher.get_logs(),
|
415 |
+
label="Logs",
|
416 |
+
lines=17,
|
417 |
+
max_lines=25,
|
418 |
+
interactive=False,
|
419 |
+
elem_id="terminal_logs",
|
420 |
+
show_copy_button=True,
|
421 |
+
container=False,
|
422 |
+
)
|
423 |
+
logs_timer = gr.Timer(0.5)
|
424 |
+
logs_timer.tick(get_logs, None, logs_box)
|
425 |
+
|
426 |
+
with gr.Row():
|
427 |
+
location_map = gr.HTML(
|
428 |
+
create_risk_map(47.7486, -3.3667, "Lorient", "France"),
|
429 |
+
label="Interactive Risk Map",
|
430 |
+
)
|
431 |
+
|
432 |
+
with gr.Row():
|
433 |
+
location_status = gr.Markdown("", visible=True)
|
434 |
+
|
435 |
+
# Résumé d'analyse dans un cadre custom (CSS)
|
436 |
+
with gr.Row():
|
437 |
+
dropdown_risk_summary = gr.Markdown(
|
438 |
+
"Select a location above to begin analysis.",
|
439 |
+
label="Risk Assessment Summary",
|
440 |
+
elem_id="risk_summary_box",
|
441 |
+
)
|
442 |
+
|
443 |
+
# Recommandations dans un cadre custom (CSS)
|
444 |
+
with gr.Row():
|
445 |
+
dropdown_recommendations = gr.Markdown(
|
446 |
+
"Recommendations will appear here after analysis.",
|
447 |
+
label="AI-Generated Recommendations",
|
448 |
+
elem_id="recommendations_box",
|
449 |
+
)
|
450 |
+
|
451 |
+
with gr.TabItem("💬 Natural Language Query"):
|
452 |
+
with gr.Row():
|
453 |
+
with gr.Column(scale=2):
|
454 |
+
user_query = gr.Textbox(
|
455 |
+
label="Your Climate Risk Question",
|
456 |
+
placeholder="Will Southern California experience more wildfires this summer?",
|
457 |
+
lines=3,
|
458 |
+
info="Be as specific as possible about location, timeframe, and what you're concerned about.",
|
459 |
+
)
|
460 |
+
gr.Markdown(
|
461 |
+
"""
|
462 |
+
**Examples:**
|
463 |
+
- "What are the wildfire risks in Southern California this summer?"
|
464 |
+
- "I live in San Joaquin Valley, can I expect power outages this week ?"
|
465 |
+
- "I'm planning to move to Long Beach, what climate risks should I be aware of?"
|
466 |
+
- "How should I prepare for climate change?"
|
467 |
+
- "What ermergency preparations should SCE make for possible earthquakes?"
|
468 |
+
"""
|
469 |
+
)
|
470 |
+
|
471 |
+
with gr.Column(scale=1):
|
472 |
+
nl_profile_type = gr.Dropdown(
|
473 |
+
choices=[
|
474 |
+
"General Public",
|
475 |
+
"Business Owner",
|
476 |
+
"Electric Utility",
|
477 |
+
"Emergency Manager",
|
478 |
+
],
|
479 |
+
label="Your Profile",
|
480 |
+
value="General Public",
|
481 |
+
)
|
482 |
+
|
483 |
+
nl_business_type_dropdown = gr.Dropdown(
|
484 |
+
choices=[
|
485 |
+
"Restaurant/Food Service",
|
486 |
+
"Retail Store",
|
487 |
+
"Manufacturing",
|
488 |
+
"Construction",
|
489 |
+
"Healthcare Facility",
|
490 |
+
"Educational Institution",
|
491 |
+
"Technology/Software",
|
492 |
+
"Transportation/Logistics",
|
493 |
+
"Tourism/Hospitality",
|
494 |
+
"Financial Services",
|
495 |
+
"Real Estate",
|
496 |
+
"Agriculture/Farming",
|
497 |
+
"Energy/Utilities",
|
498 |
+
"Entertainment/Events",
|
499 |
+
"Professional Services",
|
500 |
+
"Small Office",
|
501 |
+
"Warehouse/Distribution",
|
502 |
+
"Other",
|
503 |
+
],
|
504 |
+
label="Business Type",
|
505 |
+
value="Retail Store",
|
506 |
+
visible=False,
|
507 |
+
interactive=True,
|
508 |
+
info="Select your business type for specialized recommendations",
|
509 |
+
)
|
510 |
+
|
511 |
+
nl_vulnerable_groups = gr.CheckboxGroup(
|
512 |
+
choices=[
|
513 |
+
"Elderly",
|
514 |
+
"Children",
|
515 |
+
"Chronic Health Conditions",
|
516 |
+
"Pregnant",
|
517 |
+
],
|
518 |
+
label="Vulnerable Groups in Household",
|
519 |
+
)
|
520 |
+
|
521 |
+
analyze_btn = gr.Button(
|
522 |
+
"🔍 Analyze Query & Get Recommendations",
|
523 |
+
variant="primary",
|
524 |
+
size="lg",
|
525 |
+
)
|
526 |
+
|
527 |
+
with gr.Row():
|
528 |
+
gr.HTML("""
|
529 |
+
<div style="display: flex; align-items: center; gap: 10px;">
|
530 |
+
<h3 style="margin: 0;">🛰️ Agentic Logs</h3>
|
531 |
+
</div>
|
532 |
+
""")
|
533 |
+
|
534 |
+
with gr.Row():
|
535 |
+
nl_logs_box = gr.Textbox(
|
536 |
+
value=logcatcher.get_logs(),
|
537 |
+
label="Logs",
|
538 |
+
lines=17,
|
539 |
+
max_lines=25,
|
540 |
+
interactive=False,
|
541 |
+
elem_id="nl_terminal_logs",
|
542 |
+
show_copy_button=True,
|
543 |
+
container=False,
|
544 |
+
)
|
545 |
+
nl_logs_timer = gr.Timer(0.5)
|
546 |
+
nl_logs_timer.tick(get_logs, None, nl_logs_box)
|
547 |
+
|
548 |
+
with gr.Row():
|
549 |
+
nl_location_map = gr.HTML(
|
550 |
+
"<div style='text-align: center; padding: 50px; background-color: #f0f0f0; border-radius: 10px;'>Map will appear here after analysis.</div>",
|
551 |
+
label="Interactive Risk Map",
|
552 |
+
)
|
553 |
+
|
554 |
+
# Résultats d'analyse en langage naturel dans un cadre custom (CSS)
|
555 |
+
with gr.Row():
|
556 |
+
risk_analysis_output = gr.Markdown(
|
557 |
+
"Enter your question above to get started.",
|
558 |
+
label="Risk Analysis",
|
559 |
+
elem_id="nl_risk_box",
|
560 |
+
)
|
561 |
+
|
562 |
+
# Recommandations NL dans un cadre custom (CSS)
|
563 |
+
with gr.Row():
|
564 |
+
recommendations_output = gr.Markdown(
|
565 |
+
"Personalized recommendations will appear here.",
|
566 |
+
label="AI-Generated Recommendations",
|
567 |
+
elem_id="nl_rec_box",
|
568 |
+
)
|
569 |
+
|
570 |
+
# CSS pour les cadres custom
|
571 |
+
gr.HTML("""
|
572 |
+
<style>
|
573 |
+
#risk_summary_box, #recommendations_box, #nl_risk_box, #nl_rec_box {
|
574 |
+
border: 2px solid #007aff;
|
575 |
+
border-radius: 13px;
|
576 |
+
background: #fafdff;
|
577 |
+
box-shadow: 0 2px 12px rgba(80,140,255,0.08);
|
578 |
+
padding: 20px 15px;
|
579 |
+
margin-top: 10px;
|
580 |
+
margin-bottom: 18px;
|
581 |
+
}
|
582 |
+
#terminal_logs textarea, #nl_terminal_logs textarea {
|
583 |
+
background-color: #181a1b !important;
|
584 |
+
color: #00ff66 !important;
|
585 |
+
font-family: 'Fira Mono', 'Consolas', monospace !important;
|
586 |
+
font-size: 15px;
|
587 |
+
border-radius: 9px !important;
|
588 |
+
border: 2px solid #31343a !important;
|
589 |
+
box-shadow: 0 2px 6px rgba(0,0,0,0.19);
|
590 |
+
padding: 12px 10px !important;
|
591 |
+
min-height: 320px !important;
|
592 |
+
max-height: 420px !important;
|
593 |
+
letter-spacing: 0.5px;
|
594 |
+
line-height: 1.5;
|
595 |
+
overflow-y: auto !important;
|
596 |
+
resize: vertical !important;
|
597 |
+
scrollbar-width: thin;
|
598 |
+
scrollbar-color: #6cf97c #282c34;
|
599 |
+
}
|
600 |
+
#terminal_logs, #nl_terminal_logs {
|
601 |
+
width: 100% !important;
|
602 |
+
}
|
603 |
+
</style>
|
604 |
+
""")
|
605 |
+
|
606 |
+
profile_dropdown.change(
|
607 |
+
fn=self.update_business_visibility,
|
608 |
+
inputs=[profile_dropdown],
|
609 |
+
outputs=[business_type_dropdown],
|
610 |
+
)
|
611 |
+
nl_profile_type.change(
|
612 |
+
fn=self.update_business_visibility,
|
613 |
+
inputs=[nl_profile_type],
|
614 |
+
outputs=[nl_business_type_dropdown],
|
615 |
+
)
|
616 |
+
country_dropdown.change(
|
617 |
+
fn=self.update_cities,
|
618 |
+
inputs=[country_dropdown],
|
619 |
+
outputs=[city_suggestions, state_dropdown, location_map],
|
620 |
+
)
|
621 |
+
city_input.change(
|
622 |
+
fn=self.update_map_from_location,
|
623 |
+
inputs=[country_dropdown, city_input, state_dropdown],
|
624 |
+
outputs=[location_status, location_map],
|
625 |
+
)
|
626 |
+
analyze_location_btn.click(
|
627 |
+
fn=self.analyze_with_dropdown,
|
628 |
+
inputs=[
|
629 |
+
country_dropdown,
|
630 |
+
city_input,
|
631 |
+
state_dropdown,
|
632 |
+
profile_dropdown,
|
633 |
+
business_type_dropdown,
|
634 |
+
vulnerable_groups,
|
635 |
+
],
|
636 |
+
outputs=[dropdown_risk_summary, dropdown_recommendations, location_map],
|
637 |
+
show_progress="full",
|
638 |
+
)
|
639 |
+
analyze_btn.click(
|
640 |
+
fn=self.analyze_user_input,
|
641 |
+
inputs=[
|
642 |
+
user_query,
|
643 |
+
nl_profile_type,
|
644 |
+
nl_business_type_dropdown,
|
645 |
+
nl_vulnerable_groups,
|
646 |
+
],
|
647 |
+
outputs=[
|
648 |
+
risk_analysis_output,
|
649 |
+
recommendations_output,
|
650 |
+
nl_location_map,
|
651 |
+
],
|
652 |
+
show_progress="full",
|
653 |
+
)
|
654 |
+
|
655 |
+
return app
|