Spaces:
Running
Running
import gradio as gr | |
import os | |
import json | |
import time | |
from typing import Any, Dict, List | |
from openai import OpenAI | |
from dotenv import load_dotenv | |
load_dotenv() | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# Available models for selection | |
AVAILABLE_MODELS = [ | |
"openai/gpt-oss-120b:fireworks-ai", | |
"openai/gpt-oss-20b:fireworks-ai" | |
] | |
# Default model | |
DEFAULT_MODEL = "openai/gpt-oss-120b:fireworks-ai" | |
BASE_URL = "https://router.huggingface.co/v1" | |
client = OpenAI(base_url=BASE_URL, api_key=HF_TOKEN) | |
# OpenAI-style tool specs for function calling | |
TOOLS = [ | |
{ | |
"type": "function", | |
"function": { | |
"name": "get_weather", | |
"description": "Get the current weather information for a specified city", | |
"parameters": { | |
"type": "object", | |
"properties": { | |
"city": { | |
"type": "string", | |
"description": "The name of the city to get weather information for" | |
} | |
}, | |
"required": ["city"] | |
} | |
} | |
} | |
] | |
def get_weather(city: str): | |
print(f"[debug] getting weather for {city}") | |
return f"The weather in {city} is sunny." | |
FUNCTION_MAP = { | |
"get_weather": get_weather, | |
} | |
def call_model(messages: List[Dict[str, str]], tools=None, temperature: float = 0.3, model: str = DEFAULT_MODEL): | |
"""One step with tool calling support.""" | |
if tools is None: | |
tools = TOOLS | |
try: | |
return client.chat.completions.create( | |
model=model, | |
temperature=temperature, | |
messages=messages, | |
tools=tools, | |
tool_choice="auto" | |
) | |
except Exception as e: | |
print(f"Error calling model: {e}") | |
raise | |
def run_weather_agent(user_prompt: str, model: str = DEFAULT_MODEL) -> str: | |
""" | |
High level prompt for a weather agent. | |
It gets weather information for cities and provides responses. | |
""" | |
system = { | |
"role": "system", | |
"content": ( | |
"You are a helpful weather agent. Follow these steps:\n" | |
"1. When a user asks about weather in a city, use get_weather tool\n" | |
"2. Provide a friendly response with the weather information\n" | |
"3. If no city is mentioned, ask the user to specify a city\n" | |
"4. Be conversational and helpful\n" | |
), | |
} | |
messages: List[Dict[str, str]] = [system, {"role": "user", "content": user_prompt}] | |
for step in range(3): # small safety cap | |
try: | |
resp = call_model(messages, tools=TOOLS, model=model) | |
msg = resp.choices[0].message | |
# If the model wants to call tools | |
if getattr(msg, "tool_calls", None) and msg.tool_calls: | |
# Add the assistant message with tool calls to the conversation | |
assistant_message = { | |
"role": "assistant", | |
"content": msg.content or "", | |
"tool_calls": [ | |
{ | |
"id": tool_call.id, | |
"type": "function", | |
"function": { | |
"name": tool_call.function.name, | |
"arguments": tool_call.function.arguments | |
} | |
} | |
for tool_call in msg.tool_calls | |
] | |
} | |
messages.append(assistant_message) | |
# Process each tool call | |
for tool_call in msg.tool_calls: | |
name = tool_call.function.name | |
args = {} | |
try: | |
args = json.loads(tool_call.function.arguments or "{}") | |
except json.JSONDecodeError: | |
args = {} | |
fn = FUNCTION_MAP.get(name) | |
if not fn: | |
messages.append({ | |
"role": "tool", | |
"tool_call_id": tool_call.id, | |
"name": name, | |
"content": json.dumps({"ok": False, "error": "unknown_tool"}) | |
}) | |
continue | |
try: | |
result = fn(**args) | |
except TypeError as e: | |
result = {"ok": False, "error": f"bad_args: {e}"} | |
except Exception as e: | |
result = {"ok": False, "error": repr(e)} | |
tool_response = { | |
"role": "tool", | |
"tool_call_id": tool_call.id, | |
"name": name, | |
"content": json.dumps(result), | |
} | |
messages.append(tool_response) | |
# Continue loop so the model can see tool outputs | |
continue | |
# If we have a final assistant message without tool calls | |
if msg.content: | |
return msg.content | |
# Fallback tiny sleep then continue | |
time.sleep(0.2) | |
except Exception as e: | |
# If there's an error, try to continue or return error message | |
if step == 2: # Last step | |
return f"Error occurred during processing: {e}" | |
time.sleep(0.5) | |
continue | |
return "I could not complete the task within the step limit. Try refining your query." | |
# Example usage of the weather agent | |
# if __name__ == "__main__": | |
# # Test the weather agent with different queries | |
# test_queries = [ | |
# "What's the weather like in New York?", | |
# "How's the weather in London?", | |
# "Tell me about the weather in Tokyo", | |
# "What's the weather like?" # This should prompt for a city | |
# ] | |
# print("=== Weather Agent Demo ===\n") | |
# for query in test_queries: | |
# print(f"User: {query}") | |
# try: | |
# response = call_model(messages=[{"role": "user", "content": query}]) # Assuming run_weather_agent is removed or replaced | |
# print(f"Agent: {response}\n") | |
# except Exception as e: | |
# print(f"Error: {e}\n") | |
# print("-" * 50 + "\n") | |
### GRADIO | |
def weather_chat_with_agent(message, history, model): | |
"""Handle weather chat messages and return agent responses.""" | |
if not message.strip(): | |
return history | |
try: | |
response = run_weather_agent(message, model) | |
history.append({"role": "user", "content": message}) | |
history.append({"role": "assistant", "content": response}) | |
return history | |
except Exception as e: | |
error_msg = f"Sorry, I encountered an error: {str(e)}" | |
history.append({"role": "user", "content": message}) | |
history.append({"role": "assistant", "content": error_msg}) | |
return history | |
def create_weather_interface(): | |
with gr.Blocks(title="Weather Agent") as demo: | |
gr.Markdown("# 🌤️ Weather Agent") | |
gr.Markdown("Ask me about the weather in any city!") | |
chatbot = gr.Chatbot(height=400, type="messages") | |
msg = gr.Textbox(label="Ask about weather", placeholder="e.g., What's the weather like in Paris?") | |
clear = gr.Button("Clear") | |
def respond(message, chat_history): | |
return weather_chat_with_agent(message, chat_history, DEFAULT_MODEL) | |
msg.submit(respond, [msg, chatbot], [chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
return demo | |
# To run the weather interface: | |
demo = create_weather_interface() | |
demo.launch() | |