Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,115 +4,99 @@ import requests
|
|
4 |
import boto3
|
5 |
import streamlit as st
|
6 |
from dotenv import load_dotenv
|
7 |
-
|
|
|
8 |
load_dotenv()
|
9 |
-
|
10 |
-
#
|
11 |
OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY")
|
12 |
AWS_REGION = os.getenv("AWS_REGION", "us-east-1")
|
13 |
-
|
14 |
-
# Bedrock
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
#
|
23 |
-
st.
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
if "messages" not in st.session_state:
|
28 |
st.session_state.messages = []
|
29 |
-
|
30 |
-
#
|
31 |
for msg in st.session_state.messages:
|
32 |
with st.chat_message(msg["role"]):
|
33 |
st.markdown(msg["content"])
|
34 |
-
|
35 |
-
|
36 |
def get_weather(location):
|
37 |
-
"""
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
if not location or location.strip() == "":
|
42 |
-
return {"error": "Please specify a valid location/city name."}
|
43 |
-
|
44 |
-
location = location.strip()
|
45 |
-
geo_url = f"http://api.openweathermap.org/geo/1.0/direct?q={location}&limit=1&appid={OPENWEATHERMAP_API_KEY}"
|
46 |
-
|
47 |
try:
|
|
|
48 |
geo_resp = requests.get(geo_url).json()
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
lat, lon = geo_resp[0]['lat'], geo_resp[0]['lon']
|
54 |
-
except (KeyError, IndexError, requests.RequestException) as e:
|
55 |
-
return {"error": f"Error getting location data for '{location}': {str(e)}"}
|
56 |
-
try:
|
57 |
weather_url = f"http://api.openweathermap.org/data/2.5/forecast?lat={lat}&lon={lon}&appid={OPENWEATHERMAP_API_KEY}&units=metric"
|
58 |
weather_data = requests.get(weather_url).json()
|
59 |
-
|
60 |
if 'list' not in weather_data:
|
61 |
-
return {"error": f"Unable to
|
62 |
-
|
63 |
-
forecast = [
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
except (KeyError, requests.RequestException) as e:
|
78 |
-
return {"error": f"Error getting weather forecast for '{location}': {str(e)}"}
|
79 |
-
|
80 |
-
|
81 |
def generate_react_response(user_input, conversation_history=""):
|
82 |
-
"""
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
Available tools:
|
93 |
-
- get_weather(location): Gets weather forecast for tomorrow
|
94 |
-
|
95 |
-
When you need to get weather data, respond with this JSON format:
|
96 |
{
|
97 |
-
"thought": "
|
98 |
"action": "get_weather",
|
99 |
"action_input": {"location": "city_name"}
|
100 |
}
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
# Build conversation context
|
112 |
-
messages = [
|
113 |
-
{"role": "user", "content": f"{system_prompt}\n\nConversation history: {conversation_history}\n\nUser: {user_input}"}
|
114 |
-
]
|
115 |
-
|
116 |
claude_body = {
|
117 |
"anthropic_version": "bedrock-2023-05-31",
|
118 |
"max_tokens": 1000,
|
@@ -120,120 +104,93 @@ If the user doesn't specify a location, ask them to specify it conversationally.
|
|
120 |
"top_p": 0.9,
|
121 |
"messages": messages
|
122 |
}
|
123 |
-
|
124 |
-
response =
|
125 |
modelId="anthropic.claude-3-sonnet-20240229-v1:0",
|
126 |
contentType="application/json",
|
127 |
accept="application/json",
|
128 |
body=json.dumps(claude_body),
|
129 |
)
|
130 |
-
|
131 |
content = json.loads(response["body"].read())["content"][0]["text"].strip()
|
132 |
-
|
133 |
-
# Try
|
134 |
try:
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
location = react_response.get("action_input", {}).get("location", "").strip()
|
139 |
-
thought = react_response.get("thought", "")
|
140 |
-
|
141 |
-
# Validate location before calling weather function
|
142 |
if not location:
|
143 |
-
return "
|
144 |
-
|
145 |
-
# Get weather data
|
146 |
weather_data = get_weather(location)
|
147 |
-
|
148 |
if "error" in weather_data:
|
149 |
return weather_data["error"]
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
1. The location
|
161 |
-
2. Your reasoning based on the weather conditions
|
162 |
-
3. Clear umbrella advice
|
163 |
-
|
164 |
-
Format like: "You [do/do not] need to carry an umbrella tomorrow as the weather in [location] will be [conditions and reasoning]."
|
165 |
"""
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
170 |
"anthropic_version": "bedrock-2023-05-31",
|
171 |
"max_tokens": 500,
|
172 |
"temperature": 0.7,
|
173 |
-
"messages":
|
174 |
-
}
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
contentType="application/json",
|
179 |
-
accept="application/json",
|
180 |
-
body=json.dumps(final_body),
|
181 |
-
)
|
182 |
-
|
183 |
-
final_content = json.loads(final_response["body"].read())["content"][0]["text"].strip()
|
184 |
-
return final_content
|
185 |
-
except Exception as e:
|
186 |
-
return f"Error processing weather data: {str(e)}"
|
187 |
-
|
188 |
except json.JSONDecodeError:
|
189 |
-
#
|
190 |
-
|
191 |
-
|
192 |
return content
|
193 |
-
|
194 |
-
|
195 |
-
def
|
196 |
-
"""
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
return "\n".join(history)
|
201 |
-
|
202 |
-
|
203 |
-
if prompt := st.chat_input("Type your question here..."):
|
204 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
205 |
with st.chat_message("user"):
|
206 |
-
st.markdown(
|
207 |
-
|
208 |
with st.chat_message("assistant"):
|
209 |
-
with st.spinner("Thinking..."):
|
210 |
-
|
211 |
-
|
212 |
-
st.markdown(
|
213 |
-
|
214 |
-
|
215 |
-
|
|
|
216 |
with st.sidebar:
|
217 |
-
st.
|
218 |
-
st.markdown(""
|
219 |
-
- Powered by **AWS Bedrock (Claude Sonnet)**
|
220 |
-
- Uses **ReAct (Reasoning + Acting)** methodology
|
221 |
-
- Retrieves real-time data from **OpenWeatherMap**
|
222 |
-
- Provides step-by-step reasoning for umbrella advice
|
223 |
-
""")
|
224 |
-
|
225 |
-
st.subheader("Sample Prompts")
|
226 |
-
st.markdown("""
|
227 |
-
- Should I bring an umbrella tomorrow?
|
228 |
-
- Will it rain in Delhi tomorrow?
|
229 |
-
- Do I need an umbrella in Tokyo?
|
230 |
-
- Should I carry an umbrella tomorrow in London?
|
231 |
-
""")
|
232 |
-
|
233 |
-
st.subheader("ReAct Process")
|
234 |
st.markdown("""
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import boto3
|
5 |
import streamlit as st
|
6 |
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
# Load env (for local dev / Hugging Face secrets)
|
9 |
load_dotenv()
|
10 |
+
|
11 |
+
# Configs
|
12 |
OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY")
|
13 |
AWS_REGION = os.getenv("AWS_REGION", "us-east-1")
|
14 |
+
|
15 |
+
# AWS Bedrock Runtime
|
16 |
+
session = boto3.Session(
|
17 |
+
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
|
18 |
+
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
|
19 |
+
region_name=AWS_REGION
|
20 |
+
)
|
21 |
+
bedrock_runtime = session.client("bedrock-runtime")
|
22 |
+
|
23 |
+
# Streamlit Page Config
|
24 |
+
st.set_page_config(page_title="π€οΈ Weather Umbrella Advisor", page_icon="β", layout="centered")
|
25 |
+
|
26 |
+
# --- Title Section ---
|
27 |
+
st.markdown("""
|
28 |
+
<div style="text-align: center;">
|
29 |
+
<h1 style="color: #3c79f5;">β Weather Umbrella Advisor</h1>
|
30 |
+
<p style="font-size: 18px;">Ask me if you need to carry an umbrella tomorrow, powered by <b>Claude + OpenWeatherMap</b>.</p>
|
31 |
+
</div>
|
32 |
+
""", unsafe_allow_html=True)
|
33 |
+
|
34 |
+
# Chat history state
|
35 |
if "messages" not in st.session_state:
|
36 |
st.session_state.messages = []
|
37 |
+
|
38 |
+
# --- Display Past Messages ---
|
39 |
for msg in st.session_state.messages:
|
40 |
with st.chat_message(msg["role"]):
|
41 |
st.markdown(msg["content"])
|
42 |
+
|
43 |
+
# --- Weather API Call ---
|
44 |
def get_weather(location):
|
45 |
+
"""Fetches weather data for next 24 hours for given city."""
|
46 |
+
if not location.strip():
|
47 |
+
return {"error": "Please specify a valid location."}
|
48 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
try:
|
50 |
+
geo_url = f"http://api.openweathermap.org/geo/1.0/direct?q={location}&limit=1&appid={OPENWEATHERMAP_API_KEY}"
|
51 |
geo_resp = requests.get(geo_url).json()
|
52 |
+
if not geo_resp:
|
53 |
+
return {"error": f"Location '{location}' not found."}
|
54 |
+
|
|
|
55 |
lat, lon = geo_resp[0]['lat'], geo_resp[0]['lon']
|
|
|
|
|
|
|
56 |
weather_url = f"http://api.openweathermap.org/data/2.5/forecast?lat={lat}&lon={lon}&appid={OPENWEATHERMAP_API_KEY}&units=metric"
|
57 |
weather_data = requests.get(weather_url).json()
|
|
|
58 |
if 'list' not in weather_data:
|
59 |
+
return {"error": f"Unable to fetch weather forecast for '{location}'."}
|
60 |
+
|
61 |
+
forecast = [{
|
62 |
+
"time": f["dt_txt"],
|
63 |
+
"description": f["weather"][0]["description"].capitalize(),
|
64 |
+
"rain_probability": round(f.get("pop", 0) * 100, 1),
|
65 |
+
"temp": f["main"]["temp"],
|
66 |
+
"humidity": f["main"]["humidity"]
|
67 |
+
} for f in weather_data['list'][:8]] # 24 hrs = 8 x 3hr blocks
|
68 |
+
|
69 |
+
return {"location": location.title(), "forecast": forecast}
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
return {"error": str(e)}
|
73 |
+
|
74 |
+
# --- ReAct-Powered Response Generator ---
|
|
|
|
|
|
|
|
|
75 |
def generate_react_response(user_input, conversation_history=""):
|
76 |
+
"""Uses Claude with ReAct to give umbrella recommendation."""
|
77 |
+
system_prompt = """You are a helpful assistant using the ReAct (Reasoning + Acting) method to answer whether the user should carry an umbrella tomorrow.
|
78 |
+
|
79 |
+
Steps:
|
80 |
+
1. Think about the question.
|
81 |
+
2. Act using get_weather(location).
|
82 |
+
3. Observe the weather data.
|
83 |
+
4. Reason and give a clear answer.
|
84 |
+
|
85 |
+
When you need weather data, reply in this format:
|
|
|
|
|
|
|
|
|
86 |
{
|
87 |
+
"thought": "Need weather info for [location]",
|
88 |
"action": "get_weather",
|
89 |
"action_input": {"location": "city_name"}
|
90 |
}
|
91 |
+
|
92 |
+
If no location is mentioned, ask the user to specify one.
|
93 |
+
|
94 |
+
If you have the weather data, give a natural reply like:
|
95 |
+
"You do not need an umbrella tomorrow in London, as it's expected to be sunny and dry."
|
96 |
+
"""
|
97 |
+
|
98 |
+
messages = [{"role": "user", "content": f"{system_prompt}\n\nChat history:\n{conversation_history}\n\nUser: {user_input}"}]
|
99 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
claude_body = {
|
101 |
"anthropic_version": "bedrock-2023-05-31",
|
102 |
"max_tokens": 1000,
|
|
|
104 |
"top_p": 0.9,
|
105 |
"messages": messages
|
106 |
}
|
107 |
+
|
108 |
+
response = bedrock_runtime.invoke_model(
|
109 |
modelId="anthropic.claude-3-sonnet-20240229-v1:0",
|
110 |
contentType="application/json",
|
111 |
accept="application/json",
|
112 |
body=json.dumps(claude_body),
|
113 |
)
|
|
|
114 |
content = json.loads(response["body"].read())["content"][0]["text"].strip()
|
115 |
+
|
116 |
+
# Try parsing ReAct JSON
|
117 |
try:
|
118 |
+
parsed = json.loads(content)
|
119 |
+
if parsed.get("action") == "get_weather":
|
120 |
+
location = parsed["action_input"]["location"]
|
|
|
|
|
|
|
|
|
121 |
if not location:
|
122 |
+
return "Please tell me which city you're asking about π."
|
123 |
+
|
|
|
124 |
weather_data = get_weather(location)
|
|
|
125 |
if "error" in weather_data:
|
126 |
return weather_data["error"]
|
127 |
+
|
128 |
+
forecast_str = json.dumps(weather_data, indent=2)
|
129 |
+
reasoning_prompt = f"""Based on this weather forecast for {location}, give an umbrella recommendation:
|
130 |
+
|
131 |
+
{forecast_str}
|
132 |
+
|
133 |
+
Return a natural response that includes:
|
134 |
+
- Location
|
135 |
+
- Conditions (rain %, sky, etc.)
|
136 |
+
- Clear YES/NO umbrella advice
|
|
|
|
|
|
|
|
|
|
|
137 |
"""
|
138 |
+
final_response = bedrock_runtime.invoke_model(
|
139 |
+
modelId="anthropic.claude-3-sonnet-20240229-v1:0",
|
140 |
+
contentType="application/json",
|
141 |
+
accept="application/json",
|
142 |
+
body=json.dumps({
|
143 |
"anthropic_version": "bedrock-2023-05-31",
|
144 |
"max_tokens": 500,
|
145 |
"temperature": 0.7,
|
146 |
+
"messages": [{"role": "user", "content": reasoning_prompt}]
|
147 |
+
})
|
148 |
+
)
|
149 |
+
return json.loads(final_response["body"].read())["content"][0]["text"].strip()
|
150 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
except json.JSONDecodeError:
|
152 |
+
pass # Possibly just a reply from Claude
|
153 |
+
|
|
|
154 |
return content
|
155 |
+
|
156 |
+
# --- Chat Input Handling ---
|
157 |
+
def build_convo_history():
|
158 |
+
return "\n".join([f"{m['role'].capitalize()}: {m['content']}" for m in st.session_state.messages[-4:]])
|
159 |
+
|
160 |
+
if user_prompt := st.chat_input("Ask: Do I need an umbrella tomorrow?"):
|
161 |
+
st.session_state.messages.append({"role": "user", "content": user_prompt})
|
|
|
|
|
|
|
|
|
|
|
162 |
with st.chat_message("user"):
|
163 |
+
st.markdown(user_prompt)
|
164 |
+
|
165 |
with st.chat_message("assistant"):
|
166 |
+
with st.spinner("Thinking... π€"):
|
167 |
+
history = build_convo_history()
|
168 |
+
response = generate_react_response(user_prompt, history)
|
169 |
+
st.markdown(response)
|
170 |
+
|
171 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
172 |
+
|
173 |
+
# --- Sidebar ---
|
174 |
with st.sidebar:
|
175 |
+
st.image("https://img.icons8.com/clouds/100/umbrella.png", width=100)
|
176 |
+
st.markdown("## βοΈ About")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
st.markdown("""
|
178 |
+
**Weather Assistant** gives you umbrella advice using:
|
179 |
+
- π¦οΈ Real-time weather via **OpenWeatherMap**
|
180 |
+
- π§ Smart reasoning via **Claude (Bedrock)**
|
181 |
+
- π€ ReAct method: Think β’ Act β’ Observe β’ Reason
|
182 |
+
|
183 |
+
---
|
184 |
+
|
185 |
+
### π¬ Try Saying
|
186 |
+
- "Should I bring an umbrella tomorrow?"
|
187 |
+
- "Will it rain in Delhi tomorrow?"
|
188 |
+
- "Do I need an umbrella in Tokyo?"
|
189 |
+
|
190 |
+
---
|
191 |
+
|
192 |
+
### π§ Tools Used
|
193 |
+
- Claude 3 Sonnet (Bedrock)
|
194 |
+
- OpenWeatherMap API
|
195 |
+
- Streamlit (frontend)
|
196 |
+
""")
|