ceymox commited on
Commit
3945efd
·
verified ·
1 Parent(s): 23f910c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -68
app.py CHANGED
@@ -1,11 +1,15 @@
1
  import json
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  # Function to call the Llama 3.1 8B model through Hugging Face API
6
  def call_llama_model(user_query):
7
- # Initialize the inference client
8
- client = InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct")
 
 
 
9
 
10
  # Define the addition function schema
11
  function_schema = {
@@ -29,14 +33,18 @@ def call_llama_model(user_query):
29
 
30
  # Create the system prompt with function definition
31
  system_prompt = f"""You have access to the following function:
32
- {json.dumps(function_schema, indent=2)}
33
-
34
- Your task is to extract two numbers from the user's query and call the add_numbers function.
35
- Format your response as JSON with the function name and parameters.
36
- Only respond with valid JSON containing the function call, nothing else.
37
- """
 
 
 
 
38
 
39
- # Call the model
40
  response = client.text_generation(
41
  prompt=f"<|system|>\n{system_prompt}\n<|user|>\n{user_query}\n<|assistant|>",
42
  max_new_tokens=256,
@@ -52,6 +60,9 @@ def process_addition(query):
52
  # Get model response
53
  model_response = call_llama_model(query)
54
 
 
 
 
55
  # Try to parse the JSON response
56
  try:
57
  # Find the JSON part in the response (it might have additional text)
@@ -61,8 +72,9 @@ def process_addition(query):
61
  if json_start >= 0 and json_end > json_start:
62
  json_str = model_response[json_start:json_end]
63
  response_data = json.loads(json_str)
 
64
  else:
65
- return f"Error: No valid JSON found in response: {model_response}"
66
 
67
  # Check if it has a function call
68
  if "function_call" in response_data:
@@ -70,68 +82,21 @@ def process_addition(query):
70
  params = response_data["function_call"]["parameters"]
71
 
72
  if function_name == "add_numbers":
73
- num1 = params["num1"]
74
- num2 = params["num2"]
 
75
  result = num1 + num2
76
 
77
  # Return a formatted response
78
  return f"""
79
- Model parsed your query as:
80
- - First number: {num1}
81
- - Second number: {num2}
82
-
83
- Function called: {function_name}
84
- Result: {result}
85
- """
86
- else:
87
- return f"Unknown function: {function_name}"
88
- else:
89
- return f"No function call found in response: {response_data}"
90
-
91
- except json.JSONDecodeError as e:
92
- return f"Error parsing JSON: {str(e)}\nRaw response: {model_response}"
93
-
94
- except Exception as e:
95
- return f"Error: {str(e)}"
96
 
97
- # Create Gradio interface
98
- def create_demo():
99
- with gr.Blocks() as demo:
100
- gr.Markdown("# Llama 3.1 Function Calling Demo: Addition")
101
- gr.Markdown("Enter a query asking to add two numbers (e.g., 'Add 25 and 17' or 'What's 42 plus 58?')")
102
-
103
- with gr.Row():
104
- with gr.Column():
105
- query_input = gr.Textbox(
106
- label="Your Query",
107
- placeholder="Add 25 and 17"
108
- )
109
- submit_btn = gr.Button("Calculate")
110
-
111
- with gr.Column():
112
- output = gr.Textbox(label="Result")
113
-
114
- submit_btn.click(
115
- fn=process_addition,
116
- inputs=query_input,
117
- outputs=output
118
- )
119
-
120
- gr.Examples(
121
- examples=[
122
- "Add 25 and 17",
123
- "What is 42 plus 58?",
124
- "Can you sum 123 and 456?",
125
- "I need to add 7.5 and 2.25",
126
- "What's the total of 1000 and 2000?"
127
- ],
128
- inputs=query_input
129
- )
130
-
131
- return demo
132
 
133
- # Create and launch the demo
134
- demo = create_demo()
135
 
136
- if __name__ == "__main__":
137
- demo.launch()
 
 
1
  import json
2
+ import os
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
6
  # Function to call the Llama 3.1 8B model through Hugging Face API
7
  def call_llama_model(user_query):
8
+ # Initialize the inference client with access token
9
+ client = InferenceClient(
10
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
11
+ token=os.environ.get("HF_TOKEN")
12
+ )
13
 
14
  # Define the addition function schema
15
  function_schema = {
 
33
 
34
  # Create the system prompt with function definition
35
  system_prompt = f"""You have access to the following function:
36
+ {json.dumps(function_schema, indent=2)}
37
+
38
+ When given a query about adding numbers, you must:
39
+ 1. Extract the two numbers from the query
40
+ 2. Call the add_numbers function with these numbers
41
+ 3. Respond ONLY with a valid JSON object in this exact format:
42
+ {{"function_call": {{"name": "add_numbers", "parameters": {{"num1": [first number], "num2": [second number]}}}}}}
43
+
44
+ DO NOT include any explanation, just the JSON.
45
+ """
46
 
47
+ # Call the model with the appropriate format for Llama 3.1
48
  response = client.text_generation(
49
  prompt=f"<|system|>\n{system_prompt}\n<|user|>\n{user_query}\n<|assistant|>",
50
  max_new_tokens=256,
 
60
  # Get model response
61
  model_response = call_llama_model(query)
62
 
63
+ # Create a debug output with the raw response
64
+ debug_info = f"Raw model response:\n{model_response}\n\n"
65
+
66
  # Try to parse the JSON response
67
  try:
68
  # Find the JSON part in the response (it might have additional text)
 
72
  if json_start >= 0 and json_end > json_start:
73
  json_str = model_response[json_start:json_end]
74
  response_data = json.loads(json_str)
75
+ debug_info += f"Parsed JSON:\n{json.dumps(response_data, indent=2)}\n\n"
76
  else:
77
+ return f"Error: No valid JSON found in response.\n\n{debug_info}"
78
 
79
  # Check if it has a function call
80
  if "function_call" in response_data:
 
82
  params = response_data["function_call"]["parameters"]
83
 
84
  if function_name == "add_numbers":
85
+ # Extract the numbers
86
+ num1 = float(params["num1"])
87
+ num2 = float(params["num2"])
88
  result = num1 + num2
89
 
90
  # Return a formatted response
91
  return f"""
92
+ ### Input Processed Successfully
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ **Numbers extracted:** {num1} and {num2}
95
+
96
+ **Function called:** `{function_name}`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ **Result:** {result}
 
99
 
100
+ **JSON Function Call:**
101
+ ```json
102
+ {json.dumps(response_data, indent=2)}