antoniomtz commited on
Commit
ce3bf23
·
verified ·
1 Parent(s): 975d3c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -17
app.py CHANGED
@@ -1,22 +1,19 @@
1
  import os
2
  import json
3
  import gradio as gr
 
4
  from dotenv import load_dotenv
5
- from huggingface_hub import login
6
- from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
7
  from llama_index.core.agent import ReActAgent
8
  from llama_index.core.tools import FunctionTool
 
9
 
10
  # Load environment variables
11
  load_dotenv()
12
 
13
- # Get Hugging Face token
14
- hf_token = os.getenv("HUGGINGFACE_TOKEN")
15
- if not hf_token:
16
- raise ValueError("Hugging Face token not found. Configure HUGGINGFACE_TOKEN in your environment variables")
17
-
18
- # Authenticate with Hugging Face
19
- login(token=hf_token)
20
 
21
  # Define weather function with static data
22
  def get_current_weather(location: str, unit: str = "fahrenheit") -> dict:
@@ -31,7 +28,6 @@ def get_current_weather(location: str, unit: str = "fahrenheit") -> dict:
31
  dict: Weather information including location, temperature and unit
32
  """
33
  location = location.lower()
34
-
35
  if "tokyo" in location:
36
  return {"location": "Tokyo", "temperature": "10", "unit": "celsius"}
37
  elif "san francisco" in location:
@@ -48,12 +44,28 @@ weather_tool = FunctionTool.from_defaults(
48
  description="Get the current weather in a given location"
49
  )
50
 
51
- # Configure the language model
52
- llm = HuggingFaceInferenceAPI(
53
- model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  temperature=0.7,
55
  max_tokens=512,
56
- token=hf_token,
57
  )
58
 
59
  # Create the agent with an appropriate system prompt
@@ -72,7 +84,6 @@ def respond(message, history):
72
  with gr.Blocks(title="Weather Assistant") as demo:
73
  gr.Markdown("# 🌤️ Weather Assistant")
74
  gr.Markdown("### Ask about the weather in Tokyo, San Francisco, or Paris")
75
-
76
  chatbot = gr.ChatInterface(
77
  respond,
78
  examples=[
@@ -84,8 +95,7 @@ with gr.Blocks(title="Weather Assistant") as demo:
84
  ],
85
  title="Chat with Weather Assistant"
86
  )
87
-
88
- gr.Markdown("### Built with LlamaIndex and Qwen2.5-Coder-32B-Instruct")
89
 
90
  # Launch the application
91
  if __name__ == "__main__":
 
1
  import os
2
  import json
3
  import gradio as gr
4
+ import requests
5
  from dotenv import load_dotenv
 
 
6
  from llama_index.core.agent import ReActAgent
7
  from llama_index.core.tools import FunctionTool
8
+ from llama_index.llms.openai import OpenAI
9
 
10
  # Load environment variables
11
  load_dotenv()
12
 
13
+ # Get OpenRouter token
14
+ openrouter_token = os.getenv("OPENROUTER_API_KEY")
15
+ if not openrouter_token:
16
+ raise ValueError("OpenRouter token not found. Configure OPENROUTER_API_KEY in your environment variables")
 
 
 
17
 
18
  # Define weather function with static data
19
  def get_current_weather(location: str, unit: str = "fahrenheit") -> dict:
 
28
  dict: Weather information including location, temperature and unit
29
  """
30
  location = location.lower()
 
31
  if "tokyo" in location:
32
  return {"location": "Tokyo", "temperature": "10", "unit": "celsius"}
33
  elif "san francisco" in location:
 
44
  description="Get the current weather in a given location"
45
  )
46
 
47
+ # Custom OpenRouter implementation using OpenAI-compatible interface
48
+ class OpenRouterLLM(OpenAI):
49
+ def __init__(self, model_name="qwen/qwen-2.5-coder-32b-instruct:free", temperature=0.7, max_tokens=512, api_key=None):
50
+ # Initialize with custom base URL and model name
51
+ super().__init__(
52
+ model=model_name,
53
+ temperature=temperature,
54
+ max_tokens=max_tokens,
55
+ api_key=api_key,
56
+ api_base="https://openrouter.ai/api/v1",
57
+ additional_headers={
58
+ "HTTP-Referer": "weather-assistant-app",
59
+ "X-Title": "Weather Assistant"
60
+ }
61
+ )
62
+
63
+ # Configure the language model with OpenRouter
64
+ llm = OpenRouterLLM(
65
+ model_name="qwen/qwen-2.5-coder-32b-instruct:free",
66
  temperature=0.7,
67
  max_tokens=512,
68
+ api_key=openrouter_token
69
  )
70
 
71
  # Create the agent with an appropriate system prompt
 
84
  with gr.Blocks(title="Weather Assistant") as demo:
85
  gr.Markdown("# 🌤️ Weather Assistant")
86
  gr.Markdown("### Ask about the weather in Tokyo, San Francisco, or Paris")
 
87
  chatbot = gr.ChatInterface(
88
  respond,
89
  examples=[
 
95
  ],
96
  title="Chat with Weather Assistant"
97
  )
98
+ gr.Markdown("### Built with LlamaIndex and OpenRouter API")
 
99
 
100
  # Launch the application
101
  if __name__ == "__main__":