File size: 8,393 Bytes
7ad4ea6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4c529c
7ad4ea6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
import os
import json
import requests
import boto3
import streamlit as st
from dotenv import load_dotenv
 
load_dotenv()
 
# Environment config
OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY")
AWS_REGION = os.getenv("AWS_REGION", "us-east-1")
 
# Bedrock Claude client
bedrock = boto3.client("bedrock-runtime", region_name=AWS_REGION)
 
# App UI
st.title("Weather Assistant - Umbrella Advisor")
st.markdown("Ask me if you should carry an umbrella tomorrow!")
 
# Session state for chat
if "messages" not in st.session_state:
    st.session_state.messages = []
 
# Chat history display
for msg in st.session_state.messages:
    with st.chat_message(msg["role"]):
        st.markdown(msg["content"])
 
 
def get_weather(location):
    """Get weather forecast for a specific location"""
    print(f"Getting weather for: {location}")
   
    # Validate location input
    if not location or location.strip() == "":
        return {"error": "Please specify a valid location/city name."}
   
    location = location.strip()
    geo_url = f"http://api.openweathermap.org/geo/1.0/direct?q={location}&limit=1&appid={OPENWEATHERMAP_API_KEY}"
   
    try:
        geo_resp = requests.get(geo_url).json()
       
        if not geo_resp or len(geo_resp) == 0:
            return {"error": f"Location '{location}' not found. Please check the spelling and try again."}
       
        lat, lon = geo_resp[0]['lat'], geo_resp[0]['lon']
    except (KeyError, IndexError, requests.RequestException) as e:
        return {"error": f"Error getting location data for '{location}': {str(e)}"}
    try:
        weather_url = f"http://api.openweathermap.org/data/2.5/forecast?lat={lat}&lon={lon}&appid={OPENWEATHERMAP_API_KEY}&units=metric"
        weather_data = requests.get(weather_url).json()
       
        if 'list' not in weather_data:
            return {"error": f"Unable to get weather forecast for '{location}'."}
       
        forecast = []
        for f in weather_data['list'][:8]:  # Next 24 hours
            forecast.append({
                "time": f["dt_txt"],
                "description": f["weather"][0]["description"],
                "rain_probability": f.get("pop", 0) * 100,
                "temp": f["main"]["temp"],
                "humidity": f["main"]["humidity"]
            })
       
        return {
            "location": location,
            "forecast": forecast
        }
    except (KeyError, requests.RequestException) as e:
        return {"error": f"Error getting weather forecast for '{location}': {str(e)}"}
 
 
def generate_react_response(user_input, conversation_history=""):
    """Generate response using ReAct (Reasoning + Acting) approach"""
   
    system_prompt = """You are a helpful weather assistant that uses ReAct (Reasoning + Acting) methodology to help users decide about carrying umbrellas.
 
Follow this process:
1. **Think**: Analyze what the user is asking
2. **Act**: Use available tools if needed
3. **Observe**: Process the results
4. **Reason**: Draw conclusions and provide advice
 
Available tools:
- get_weather(location): Gets weather forecast for tomorrow
 
When you need to get weather data, respond with this JSON format:
{
  "thought": "I need to get weather data for [location] to advise about umbrella",
  "action": "get_weather",
  "action_input": {"location": "city_name"}
}
 
When you have all needed information, provide a conversational response that includes:
- The location
- Your reasoning based on weather conditions
- Clear umbrella advice
 
Example: "You do not need to carry an umbrella tomorrow as the weather in New York will be sunny with no chance of rain."
 
If the user doesn't specify a location, ask them to specify it conversationally."""
 
    # Build conversation context
    messages = [
        {"role": "user", "content": f"{system_prompt}\n\nConversation history: {conversation_history}\n\nUser: {user_input}"}
    ]
   
    claude_body = {
        "anthropic_version": "bedrock-2023-05-31",
        "max_tokens": 1000,
        "temperature": 0.7,
        "top_p": 0.9,
        "messages": messages
    }
 
    response = bedrock.invoke_model(
        modelId="anthropic.claude-3-sonnet-20240229-v1:0",
        contentType="application/json",
        accept="application/json",
        body=json.dumps(claude_body),
    )
 
    content = json.loads(response["body"].read())["content"][0]["text"].strip()
 
    # Try to parse as ReAct JSON
    try:
        react_response = json.loads(content)
       
        if react_response.get("action") == "get_weather":
            location = react_response.get("action_input", {}).get("location", "").strip()
            thought = react_response.get("thought", "")
           
            # Validate location before calling weather function
            if not location:
                return "I need to know which city or location you're asking about. Could you please specify the location?"
           
            # Get weather data
            weather_data = get_weather(location)
           
            if "error" in weather_data:
                return weather_data["error"]
           
            # Process weather data and generate final reasoning
            try:
                reasoning_prompt = f"""Based on this weather data for {location}, provide your final umbrella recommendation:
 
Weather forecast: {json.dumps(weather_data, indent=2)}
 
Your previous thought: {thought}
 
Provide a conversational response that includes:
1. The location
2. Your reasoning based on the weather conditions
3. Clear umbrella advice
 
Format like: "You [do/do not] need to carry an umbrella tomorrow as the weather in [location] will be [conditions and reasoning]."
"""
               
                final_messages = [{"role": "user", "content": reasoning_prompt}]
               
                final_body = {
                    "anthropic_version": "bedrock-2023-05-31",
                    "max_tokens": 500,
                    "temperature": 0.7,
                    "messages": final_messages
                }
               
                final_response = bedrock.invoke_model(
                    modelId="anthropic.claude-3-sonnet-20240229-v1:0",
                    contentType="application/json",
                    accept="application/json",
                    body=json.dumps(final_body),
                )
               
                final_content = json.loads(final_response["body"].read())["content"][0]["text"].strip()
                return final_content
            except Exception as e:
                return f"Error processing weather data: {str(e)}"
           
    except json.JSONDecodeError:
        # If not JSON, return the content as is (probably asking for location)
        pass
   
    return content
 
 
def build_conversation_history():
    """Build conversation history for context"""
    history = []
    for msg in st.session_state.messages[-4:]:  # Last 4 messages for context
        history.append(f"{msg['role'].capitalize()}: {msg['content']}")
    return "\n".join(history)
 
 
if prompt := st.chat_input("Type your question here..."):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
 
    with st.chat_message("assistant"):
        with st.spinner("Thinking..."):
            conversation_history = build_conversation_history()
            reply = generate_react_response(prompt, conversation_history)
            st.markdown(reply)
    st.session_state.messages.append({"role": "assistant", "content": reply})
 
 
with st.sidebar:
    st.header("About")
    st.markdown("""
    - Powered by **AWS Bedrock (Claude Sonnet)**  
    - Uses **ReAct (Reasoning + Acting)** methodology
    - Retrieves real-time data from **OpenWeatherMap**  
    - Provides step-by-step reasoning for umbrella advice
    """)
   
    st.subheader("Sample Prompts")
    st.markdown("""
    - Should I bring an umbrella tomorrow?  
    - Will it rain in Delhi tomorrow?  
    - Do I need an umbrella in Tokyo?
    - Should I carry an umbrella tomorrow in London?
    """)
   
    st.subheader("ReAct Process")
    st.markdown("""
    1. **Think**: Analyze your question
    2. **Act**: Get weather data if needed
    3. **Observe**: Process weather information  
    4. **Reason**: Provide umbrella advice with explanation
    """)