antoniomtz commited on
Commit
3045cfe
·
verified ·
1 Parent(s): 35b303e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -65
app.py CHANGED
@@ -1,102 +1,158 @@
1
  import os
2
  import json
3
  import gradio as gr
4
- from qwen_agent.llm import get_chat_model
 
5
 
6
  # Define a static weather tool function
7
  def get_current_weather(location, unit="fahrenheit"):
8
  """Get the current weather in a given location"""
9
  if "tokyo" in location.lower():
10
- return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
11
  elif "san francisco" in location.lower():
12
- return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
13
  elif "paris" in location.lower():
14
- return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
15
  else:
16
- return json.dumps({"location": location, "temperature": "unknown"})
17
 
18
- # Set up the weather function definition
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  weather_function = {
20
- 'name': 'get_current_weather',
21
- 'description': 'Get the current weather in a given location',
22
- 'parameters': {
23
- 'type': 'object',
24
- 'properties': {
25
- 'location': {
26
- 'type': 'string',
27
- 'description': 'The city and state, e.g. San Francisco, CA',
28
- },
29
- 'unit': {
30
- 'type': 'string',
31
- 'enum': ['celsius', 'fahrenheit'],
32
- 'description': 'The unit of temperature to use. Infer this from the user\'s location.'
 
 
 
33
  },
34
- },
35
- 'required': ['location'],
36
- },
37
  }
38
 
39
- # Initialize the Qwen model
40
- def init_model():
41
- llm = get_chat_model({
42
- 'model': 'Qwen/Qwen2.5-Coder-32B-Instruct',
43
- 'endpoint_type': 'huggingface_hub',
44
- 'token': os.environ.get("HUGGINGFACE_TOKEN"),
45
- })
46
- return llm
47
-
48
  # Processing function for Gradio
49
  def process_message(message, history):
50
  # Initialize model on first run
51
- if not hasattr(process_message, "llm"):
52
- process_message.llm = init_model()
53
 
54
- # Set up the conversation
55
- messages = [{'role': 'user', 'content': message}]
56
- functions = [weather_function]
 
 
 
 
 
 
57
 
58
- # Step 1: Get the initial response
59
  try:
60
- *_, response = process_message.llm.chat(
 
61
  messages=messages,
62
- functions=functions,
63
- stream=True,
64
  )
65
 
66
- # Step 2: Check if the model wanted to call a function
67
- if response.get('function_call', None):
68
- # Step 3: Call the function
69
- function_name = response['function_call']['name']
70
- function_args = json.loads(response['function_call']['arguments'])
71
 
72
- # Only process weather function calls
73
- if function_name == 'get_current_weather':
74
- function_response = get_current_weather(
75
- location=function_args.get('location'),
76
- unit=function_args.get('unit'),
77
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
- # Step 4: Send the function result back to the model
80
- messages.append(response) # Add the model's response with function call
81
  messages.append({
82
- 'role': 'function',
83
- 'name': function_name,
84
- 'content': function_response,
85
  })
86
 
87
- # Get final response from the model
88
- *_, final_response = process_message.llm.chat(
89
  messages=messages,
90
- functions=functions,
91
- stream=False,
92
  )
93
- return final_response['content']
94
-
95
- # If no function was called, return the initial response
96
- return response['content']
 
 
 
 
 
 
97
 
98
  except Exception as e:
99
- return f"Error processing your request: {str(e)}"
100
 
101
  # Set up the Gradio interface
102
  with gr.Blocks(title="Qwen Weather Assistant") as demo:
 
1
  import os
2
  import json
3
  import gradio as gr
4
+ from huggingface_hub import InferenceClient
5
+ from typing import Dict, Any, List
6
 
7
  # Define a static weather tool function
8
  def get_current_weather(location, unit="fahrenheit"):
9
  """Get the current weather in a given location"""
10
  if "tokyo" in location.lower():
11
+ return {"location": "Tokyo", "temperature": "10", "unit": "celsius"}
12
  elif "san francisco" in location.lower():
13
+ return {"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}
14
  elif "paris" in location.lower():
15
+ return {"location": "Paris", "temperature": "22", "unit": "celsius"}
16
  else:
17
+ return {"location": location, "temperature": "unknown", "unit": unit}
18
 
19
+ class HfApiModel:
20
+ def __init__(self, max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', token=None):
21
+ self.max_tokens = max_tokens
22
+ self.temperature = temperature
23
+ self.model_id = model_id
24
+ self.client = InferenceClient(model=model_id, token=token)
25
+
26
+ def generate_with_function_calling(self, messages, functions):
27
+ try:
28
+ # Format messages for the model
29
+ response = self.client.chat_completion(
30
+ messages=messages,
31
+ max_tokens=self.max_tokens,
32
+ temperature=self.temperature,
33
+ tools=functions,
34
+ tool_choice="auto"
35
+ )
36
+
37
+ return response
38
+ except Exception as e:
39
+ print(f"Error in generate: {str(e)}")
40
+ return {"error": str(e)}
41
+
42
+ def call_function(self, function_name, arguments):
43
+ if function_name == "get_current_weather":
44
+ location = arguments.get("location", "")
45
+ unit = arguments.get("unit", "fahrenheit")
46
+ return get_current_weather(location, unit)
47
+ return {"error": f"Function {function_name} not found"}
48
+
49
+ # Initialize the model
50
+ def init_model():
51
+ token = os.environ.get("HUGGINGFACE_TOKEN")
52
+ return HfApiModel(
53
+ max_tokens=2096,
54
+ temperature=0.5,
55
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct'
56
+ )
57
+
58
+ # Define the weather function schema
59
  weather_function = {
60
+ "type": "function",
61
+ "function": {
62
+ "name": "get_current_weather",
63
+ "description": "Get the current weather in a given location",
64
+ "parameters": {
65
+ "type": "object",
66
+ "properties": {
67
+ "location": {
68
+ "type": "string",
69
+ "description": "The city and state, e.g. San Francisco, CA"
70
+ },
71
+ "unit": {
72
+ "type": "string",
73
+ "enum": ["celsius", "fahrenheit"],
74
+ "description": "The unit of temperature to use. Infer this from the user's location."
75
+ }
76
  },
77
+ "required": ["location"]
78
+ }
79
+ }
80
  }
81
 
 
 
 
 
 
 
 
 
 
82
  # Processing function for Gradio
83
  def process_message(message, history):
84
  # Initialize model on first run
85
+ if not hasattr(process_message, "model"):
86
+ process_message.model = init_model()
87
 
88
+ # Format conversation history for the model
89
+ formatted_history = []
90
+ for human, assistant in history:
91
+ formatted_history.append({"role": "user", "content": human})
92
+ if assistant: # Check if assistant response exists
93
+ formatted_history.append({"role": "assistant", "content": assistant})
94
+
95
+ # Add the current message
96
+ messages = formatted_history + [{"role": "user", "content": message}]
97
 
 
98
  try:
99
+ # Get response from the model
100
+ response = process_message.model.generate_with_function_calling(
101
  messages=messages,
102
+ functions=[weather_function]
 
103
  )
104
 
105
+ # Check if there's a function call
106
+ if hasattr(response, "choices") and response.choices:
107
+ message_content = response.choices[0].message
 
 
108
 
109
+ # Check if the model wants to call a function
110
+ if hasattr(message_content, "tool_calls") and message_content.tool_calls:
111
+ tool_call = message_content.tool_calls[0]
112
+ function_name = tool_call.function.name
113
+ function_args = json.loads(tool_call.function.arguments)
114
+
115
+ # Call the function
116
+ function_result = process_message.model.call_function(function_name, function_args)
117
+
118
+ # Add the function result to messages
119
+ messages.append({
120
+ "role": "assistant",
121
+ "content": None,
122
+ "tool_calls": [{
123
+ "id": tool_call.id,
124
+ "type": "function",
125
+ "function": {
126
+ "name": function_name,
127
+ "arguments": tool_call.function.arguments
128
+ }
129
+ }]
130
+ })
131
 
 
 
132
  messages.append({
133
+ "role": "tool",
134
+ "tool_call_id": tool_call.id,
135
+ "content": json.dumps(function_result)
136
  })
137
 
138
+ # Get final response
139
+ final_response = process_message.model.generate_with_function_calling(
140
  messages=messages,
141
+ functions=[weather_function]
 
142
  )
143
+
144
+ if hasattr(final_response, "choices") and final_response.choices:
145
+ return final_response.choices[0].message.content
146
+ return "Error processing function result"
147
+
148
+ # If no function call, return the content directly
149
+ if hasattr(message_content, "content"):
150
+ return message_content.content
151
+
152
+ return "I couldn't process that request properly. Please try again."
153
 
154
  except Exception as e:
155
+ return f"Error: {str(e)}"
156
 
157
  # Set up the Gradio interface
158
  with gr.Blocks(title="Qwen Weather Assistant") as demo: