# ───────────────────────────────────────────────────────────────────────────── # app.py ─ Weather Umbrella Advisor (Streamlit + Claude 3 + OpenWeatherMap) # ───────────────────────────────────────────────────────────────────────────── import os import json import requests import boto3 import streamlit as st from dotenv import load_dotenv # ───────────────────────────────────────────────────────────────────────────── # 1) Load environment variables (for local .env / HF Secrets) # ───────────────────────────────────────────────────────────────────────────── load_dotenv() OPENWEATHERMAP_API_KEY = os.getenv("OPENWEATHERMAP_API_KEY") AWS_REGION = os.getenv("AWS_REGION", "us-east-1") AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") # may be None AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") # may be None # ───────────────────────────────────────────────────────────────────────────── # 2) Helper to mask credentials (so we can print a hint in the UI) # ───────────────────────────────────────────────────────────────────────────── def _mask(val: str) -> str: """ Returns a masked version of `val`, showing first 4 + last 4 chars e.g. AKIA1234abcd...WXYZ5678 """ if not val: return "None" if len(val) <= 8: return val return val[:4] + "..." + val[-4:] # ───────────────────────────────────────────────────────────────────────────── # 3) Detect if keys are reversed: we expect ACCESS_KEY_ID to start with AKIA/ASIA # If not, but SECRET_ACCESS_KEY starts with AKIA/ASIA, swap them. # ───────────────────────────────────────────────────────────────────────────── def _looks_like_access_key(key: str) -> bool: """ AWS access key IDs typically start with 'AKIA' or 'ASIA' and are 20 characters long. """ return bool(key) and (key.startswith("AKIA") or key.startswith("ASIA")) and len(key) == 20 # If ACCESS_KEY_ID doesn’t look like an AKIA/ASIA but SECRET_ACCESS_KEY does, swap: if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY: if not _looks_like_access_key(AWS_ACCESS_KEY_ID) and _looks_like_access_key(AWS_SECRET_ACCESS_KEY): AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY = AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID # ───────────────────────────────────────────────────────────────────────────── # 4) Initialize boto3 Session / Bedrock client # – If both AWS_ACCESS_KEY_ID & AWS_SECRET_ACCESS_KEY exist, pass them explicitly # – Otherwise, fall back to default credential chain (IAM role, container credentials, etc.) # ───────────────────────────────────────────────────────────────────────────── if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY: session = boto3.Session( aws_access_key_id = AWS_ACCESS_KEY_ID, aws_secret_access_key = AWS_SECRET_ACCESS_KEY, region_name = AWS_REGION, ) else: session = boto3.Session(region_name=AWS_REGION) bedrock = session.client("bedrock-runtime") # Quick sanity‐check: if credentials are still invalid, this will raise immediately. try: _ = bedrock.meta.region_name # just to force the client to exist except Exception as e: st.error(f"⚠️ Credential problem: {e}") st.stop() # ───────────────────────────────────────────────────────────────────────────── # 5) Streamlit Page Configuration & Header # ───────────────────────────────────────────────────────────────────────────── st.set_page_config(page_title="🌤️ Umbrella Advisor", page_icon="☔", layout="centered") st.markdown( """

☔ Weather Umbrella Advisor

Ask if you need an umbrella tomorrow – powered by Claude 3 Sonnet (Bedrock) + OpenWeatherMap.

""", unsafe_allow_html=True, ) # ───────────────────────────────────────────────────────────────────────────── # 6) Show masked credentials for debugging (so you can see if the swap logic worked) # ───────────────────────────────────────────────────────────────────────────── st.markdown( f""" **Debug** (masked credentials): • AWS_ACCESS_KEY_ID = `{_mask(AWS_ACCESS_KEY_ID)}` • AWS_SECRET_ACCESS_KEY = `{_mask(AWS_SECRET_ACCESS_KEY)}` """, unsafe_allow_html=True, ) # ───────────────────────────────────────────────────────────────────────────── # 7) Conversation state # ───────────────────────────────────────────────────────────────────────────── if "messages" not in st.session_state: st.session_state.messages = [] for m in st.session_state.messages: with st.chat_message(m["role"]): st.markdown(m["content"]) # ───────────────────────────────────────────────────────────────────────────── # 8) Helper: get_weather(city) → calls OpenWeatherMap and returns JSON # ───────────────────────────────────────────────────────────────────────────── def get_weather(city: str): """ Fetches a 24‐hour forecast (8 x 3‐hour intervals) for `city`. Returns either: { "location": "CityName", "forecast": [ ... ] } or { "error": "Error message" } """ city = city.strip() if not city: return {"error": "Please provide a valid city name."} try: # 1) Get lat/lon geo_url = ( f"http://api.openweathermap.org/geo/1.0/direct" f"?q={city}&limit=1&appid={OPENWEATHERMAP_API_KEY}" ) geo_resp = requests.get(geo_url, timeout=10).json() if not geo_resp: return {"error": f"City '{city}' not found."} lat, lon = geo_resp[0]["lat"], geo_resp[0]["lon"] # 2) Get 5‐day / 3hr forecast weather_url = ( f"http://api.openweathermap.org/data/2.5/forecast" f"?lat={lat}&lon={lon}" f"&appid={OPENWEATHERMAP_API_KEY}&units=metric" ) weather_data = requests.get(weather_url, timeout=10).json() if "list" not in weather_data: return {"error": f"Unable to fetch forecast for '{city}'."} forecast = [] for f in weather_data["list"][:8]: # Next 24 hours ≈ 8 slots at 3 each forecast.append({ "time": f["dt_txt"], "description": f["weather"][0]["description"].capitalize(), "rain_probability": round(f.get("pop", 0) * 100, 1), "temp": f["main"]["temp"], "humidity": f["main"]["humidity"] }) return {"location": city.title(), "forecast": forecast} except Exception as ex: return {"error": str(ex)} # ───────────────────────────────────────────────────────────────────────────── # 9) ReAct System Prompt & Helper to ask Claude (Bedrock) # ───────────────────────────────────────────────────────────────────────────── SYSTEM_PROMPT = """ You are a helpful umbrella advisor using the ReAct (Reasoning + Acting) methodology. Steps: 1. Think about the user’s question. 2. Act by calling get_weather(location) if needed. 3. Observe the weather result. 4. Reason and respond. When you need weather data, respond _exactly_ in this JSON format (no extra text): { "thought": "…", "action": "get_weather", "action_input": {"location": "CityName"} } If no location is provided, ask the user to specify one. Once you have the forecast, give a final, friendly answer such as: "You do not need an umbrella tomorrow in London because it will be sunny with 0% chance of rain." """ def ask_claude(user_input: str, history: str = "") -> str: """ 1. Send the initial ReAct prompt to Claude, including user_input + history. 2. Parse Claude’s JSON: if action == "get_weather", call get_weather(…). 3. Feed the weather data back into Claude for final reasoning. 4. Return Claude’s final text reply. """ # Step 1: Initial ReAct call body1 = { "anthropic_version": "bedrock-2023-05-31", "max_tokens": 1000, "temperature": 0.7, "top_p": 0.9, "messages": [ {"role": "user", "content": f"{SYSTEM_PROMPT}\n\nHistory:\n{history}\n\nUser: {user_input}"} ] } resp1 = bedrock.invoke_model( modelId="anthropic.claude-3-sonnet-20240229-v1:0", contentType="application/json", accept="application/json", body=json.dumps(body1) ) text1 = json.loads(resp1["body"].read())["content"][0]["text"].strip() # Step 2: Try parsing as JSON try: parsed = json.loads(text1) if parsed.get("action") == "get_weather": city = parsed["action_input"].get("location", "").strip() if not city: return "🌍 I need a city name—could you please tell me which city you mean?" wx = get_weather(city) if "error" in wx: return wx["error"] # Step 3: Ask Claude to reason over the weather data weather_json = json.dumps(wx, indent=2) prompt2 = ( f"Here is the forecast for {wx['location']}:\n\n" f"{weather_json}\n\n" "Based on this data, answer whether the user should carry an umbrella tomorrow " "in a friendly, conversational way (YES/NO + reasoning)." ) body2 = { "anthropic_version": "bedrock-2023-05-31", "max_tokens": 500, "temperature": 0.7, "messages": [{"role": "user", "content": prompt2}] } resp2 = bedrock.invoke_model( modelId="anthropic.claude-3-sonnet-20240229-v1:0", contentType="application/json", accept="application/json", body=json.dumps(body2) ) return json.loads(resp2["body"].read())["content"][0]["text"].strip() except json.JSONDecodeError: # If it wasn’t valid JSON, just return whatever Claude replied pass return text1 # ───────────────────────────────────────────────────────────────────────────── # 10) Build conversation history helper # ───────────────────────────────────────────────────────────────────────────── def _build_history(n: int = 4) -> str: """ Returns the last n messages formatted as: User: ... Assistant: ... so that Claude sees recent turns. """ hist = st.session_state.messages[-n:] return "\n".join(f"{m['role'].capitalize()}: {m['content']}" for m in hist) # ───────────────────────────────────────────────────────────────────────────── # 11) Main Chat Input / Display Loop # ───────────────────────────────────────────────────────────────────────────── if user_query := st.chat_input("Ask: Do I need an umbrella tomorrow?"): # 1) Append user message locally st.session_state.messages.append({"role": "user", "content": user_query}) with st.chat_message("user"): st.markdown(user_query) # 2) Get assistant reply with st.chat_message("assistant"): with st.spinner("🤔 Thinking…"): history = _build_history() assistant_reply = ask_claude(user_query, history) st.markdown(assistant_reply) # 3) Append assistant reply to state st.session_state.messages.append({"role": "assistant", "content": assistant_reply}) # ───────────────────────────────────────────────────────────────────────────── # 12) Sidebar (Branding / Help) # ───────────────────────────────────────────────────────────────────────────── with st.sidebar: st.image("https://img.icons8.com/clouds/100/umbrella.png", width=100) st.markdown("## ☀️ About") st.markdown( """ **Weather Umbrella Advisor** - Uses **OpenWeatherMap** for real‐time forecast - Uses **Claude 3 Sonnet (AWS Bedrock)** to reason via ReAct - Provides clear YES/NO umbrella advice with reasoning **Try these:** - "Should I bring an umbrella tomorrow?" - "Will it rain in Delhi tomorrow?" - "Do I need an umbrella in Tokyo?" """ )