Spaces:
Sleeping
Sleeping
import os | |
import json | |
import requests | |
import boto3 | |
import streamlit as st | |
from dotenv import load_dotenv | |
# Load env (for local dev / Hugging Face secrets) | |
load_dotenv() | |
# Configs | |
OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY") | |
AWS_REGION = os.getenv("AWS_REGION", "us-east-1") | |
# AWS Bedrock Runtime | |
session = boto3.Session( | |
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"), | |
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"), | |
region_name=AWS_REGION | |
) | |
bedrock_runtime = session.client("bedrock-runtime") | |
# Streamlit Page Config | |
st.set_page_config(page_title="π€οΈ Weather Umbrella Advisor", page_icon="β", layout="centered") | |
# --- Title Section --- | |
st.markdown(""" | |
<div style="text-align: center;"> | |
<h1 style="color: #3c79f5;">β Weather Umbrella Advisor</h1> | |
<p style="font-size: 18px;">Ask me if you need to carry an umbrella tomorrow, powered by <b>Claude + OpenWeatherMap</b>.</p> | |
</div> | |
""", unsafe_allow_html=True) | |
# Chat history state | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# --- Display Past Messages --- | |
for msg in st.session_state.messages: | |
with st.chat_message(msg["role"]): | |
st.markdown(msg["content"]) | |
# --- Weather API Call --- | |
def get_weather(location): | |
"""Fetches weather data for next 24 hours for given city.""" | |
if not location.strip(): | |
return {"error": "Please specify a valid location."} | |
try: | |
geo_url = f"http://api.openweathermap.org/geo/1.0/direct?q={location}&limit=1&appid={OPENWEATHERMAP_API_KEY}" | |
geo_resp = requests.get(geo_url).json() | |
if not geo_resp: | |
return {"error": f"Location '{location}' not found."} | |
lat, lon = geo_resp[0]['lat'], geo_resp[0]['lon'] | |
weather_url = f"http://api.openweathermap.org/data/2.5/forecast?lat={lat}&lon={lon}&appid={OPENWEATHERMAP_API_KEY}&units=metric" | |
weather_data = requests.get(weather_url).json() | |
if 'list' not in weather_data: | |
return {"error": f"Unable to fetch weather forecast for '{location}'."} | |
forecast = [{ | |
"time": f["dt_txt"], | |
"description": f["weather"][0]["description"].capitalize(), | |
"rain_probability": round(f.get("pop", 0) * 100, 1), | |
"temp": f["main"]["temp"], | |
"humidity": f["main"]["humidity"] | |
} for f in weather_data['list'][:8]] # 24 hrs = 8 x 3hr blocks | |
return {"location": location.title(), "forecast": forecast} | |
except Exception as e: | |
return {"error": str(e)} | |
# --- ReAct-Powered Response Generator --- | |
def generate_react_response(user_input, conversation_history=""): | |
"""Uses Claude with ReAct to give umbrella recommendation.""" | |
system_prompt = """You are a helpful assistant using the ReAct (Reasoning + Acting) method to answer whether the user should carry an umbrella tomorrow. | |
Steps: | |
1. Think about the question. | |
2. Act using get_weather(location). | |
3. Observe the weather data. | |
4. Reason and give a clear answer. | |
When you need weather data, reply in this format: | |
{ | |
"thought": "Need weather info for [location]", | |
"action": "get_weather", | |
"action_input": {"location": "city_name"} | |
} | |
If no location is mentioned, ask the user to specify one. | |
If you have the weather data, give a natural reply like: | |
"You do not need an umbrella tomorrow in London, as it's expected to be sunny and dry." | |
""" | |
messages = [{"role": "user", "content": f"{system_prompt}\n\nChat history:\n{conversation_history}\n\nUser: {user_input}"}] | |
claude_body = { | |
"anthropic_version": "bedrock-2023-05-31", | |
"max_tokens": 1000, | |
"temperature": 0.7, | |
"top_p": 0.9, | |
"messages": messages | |
} | |
response = bedrock_runtime.invoke_model( | |
modelId="anthropic.claude-3-sonnet-20240229-v1:0", | |
contentType="application/json", | |
accept="application/json", | |
body=json.dumps(claude_body), | |
) | |
content = json.loads(response["body"].read())["content"][0]["text"].strip() | |
# Try parsing ReAct JSON | |
try: | |
parsed = json.loads(content) | |
if parsed.get("action") == "get_weather": | |
location = parsed["action_input"]["location"] | |
if not location: | |
return "Please tell me which city you're asking about π." | |
weather_data = get_weather(location) | |
if "error" in weather_data: | |
return weather_data["error"] | |
forecast_str = json.dumps(weather_data, indent=2) | |
reasoning_prompt = f"""Based on this weather forecast for {location}, give an umbrella recommendation: | |
{forecast_str} | |
Return a natural response that includes: | |
- Location | |
- Conditions (rain %, sky, etc.) | |
- Clear YES/NO umbrella advice | |
""" | |
final_response = bedrock_runtime.invoke_model( | |
modelId="anthropic.claude-3-sonnet-20240229-v1:0", | |
contentType="application/json", | |
accept="application/json", | |
body=json.dumps({ | |
"anthropic_version": "bedrock-2023-05-31", | |
"max_tokens": 500, | |
"temperature": 0.7, | |
"messages": [{"role": "user", "content": reasoning_prompt}] | |
}) | |
) | |
return json.loads(final_response["body"].read())["content"][0]["text"].strip() | |
except json.JSONDecodeError: | |
pass # Possibly just a reply from Claude | |
return content | |
# --- Chat Input Handling --- | |
def build_convo_history(): | |
return "\n".join([f"{m['role'].capitalize()}: {m['content']}" for m in st.session_state.messages[-4:]]) | |
if user_prompt := st.chat_input("Ask: Do I need an umbrella tomorrow?"): | |
st.session_state.messages.append({"role": "user", "content": user_prompt}) | |
with st.chat_message("user"): | |
st.markdown(user_prompt) | |
with st.chat_message("assistant"): | |
with st.spinner("Thinking... π€"): | |
history = build_convo_history() | |
response = generate_react_response(user_prompt, history) | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
# --- Sidebar --- | |
with st.sidebar: | |
st.image("https://img.icons8.com/clouds/100/umbrella.png", width=100) | |
st.markdown("## βοΈ About") | |
st.markdown(""" | |
**Weather Assistant** gives you umbrella advice using: | |
- π¦οΈ Real-time weather via **OpenWeatherMap** | |
- π§ Smart reasoning via **Claude (Bedrock)** | |
- π€ ReAct method: Think β’ Act β’ Observe β’ Reason | |
--- | |
### π¬ Try Saying | |
- "Should I bring an umbrella tomorrow?" | |
- "Will it rain in Delhi tomorrow?" | |
- "Do I need an umbrella in Tokyo?" | |
--- | |
### π§ Tools Used | |
- Claude 3 Sonnet (Bedrock) | |
- OpenWeatherMap API | |
- Streamlit (frontend) | |
""") | |