File size: 4,688 Bytes
34917ad
 
6b3240b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34917ad
 
6b3240b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34917ad
6b3240b
34917ad
6b3240b
 
34917ad
6b3240b
 
34917ad
6b3240b
 
 
 
 
34917ad
6b3240b
 
 
 
 
 
 
 
 
 
34917ad
6b3240b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34917ad
6b3240b
 
34917ad
 
6b3240b
34917ad
 
6b3240b
 
 
 
34917ad
6b3240b
 
 
 
 
 
 
 
 
 
34917ad
6b3240b
 
 
 
 
34917ad
6b3240b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34917ad
 
96e7af4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import json
import gradio as gr
from huggingface_hub import InferenceClient

# Function to call the Llama 3.1 8B model through Hugging Face API
def call_llama_model(user_query):
    # Initialize the inference client
    client = InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct")
    
    # Define the addition function schema
    function_schema = {
        "name": "add_numbers",
        "description": "Add two numbers together",
        "parameters": {
            "type": "object",
            "properties": {
                "num1": {
                    "type": "number",
                    "description": "First number to add"
                },
                "num2": {
                    "type": "number",
                    "description": "Second number to add"
                }
            },
            "required": ["num1", "num2"]
        }
    }
    
    # Create the system prompt with function definition
    system_prompt = f"""You have access to the following function:
    {json.dumps(function_schema, indent=2)}
    
    Your task is to extract two numbers from the user's query and call the add_numbers function.
    Format your response as JSON with the function name and parameters.
    Only respond with valid JSON containing the function call, nothing else.
    """
    
    # Call the model
    response = client.text_generation(
        prompt=f"<|system|>\n{system_prompt}\n<|user|>\n{user_query}\n<|assistant|>",
        max_new_tokens=256,
        temperature=0.1,
        return_full_text=False
    )
    
    return response

# Function to parse the model response and calculate the result
def process_addition(query):
    try:
        # Get model response
        model_response = call_llama_model(query)
        
        # Try to parse the JSON response
        try:
            # Find the JSON part in the response (it might have additional text)
            json_start = model_response.find('{')
            json_end = model_response.rfind('}') + 1
            
            if json_start >= 0 and json_end > json_start:
                json_str = model_response[json_start:json_end]
                response_data = json.loads(json_str)
            else:
                return f"Error: No valid JSON found in response: {model_response}"
            
            # Check if it has a function call
            if "function_call" in response_data:
                function_name = response_data["function_call"]["name"]
                params = response_data["function_call"]["parameters"]
                
                if function_name == "add_numbers":
                    num1 = params["num1"]
                    num2 = params["num2"]
                    result = num1 + num2
                    
                    # Return a formatted response
                    return f"""
                    Model parsed your query as:
                    - First number: {num1}
                    - Second number: {num2}
                    
                    Function called: {function_name}
                    Result: {result}
                    """
                else:
                    return f"Unknown function: {function_name}"
            else:
                return f"No function call found in response: {response_data}"
                
        except json.JSONDecodeError as e:
            return f"Error parsing JSON: {str(e)}\nRaw response: {model_response}"
            
    except Exception as e:
        return f"Error: {str(e)}"

# Create Gradio interface
def create_demo():
    with gr.Blocks() as demo:
        gr.Markdown("# Llama 3.1 Function Calling Demo: Addition")
        gr.Markdown("Enter a query asking to add two numbers (e.g., 'Add 25 and 17' or 'What's 42 plus 58?')")
        
        with gr.Row():
            with gr.Column():
                query_input = gr.Textbox(
                    label="Your Query",
                    placeholder="Add 25 and 17"
                )
                submit_btn = gr.Button("Calculate")
            
            with gr.Column():
                output = gr.Textbox(label="Result")
        
        submit_btn.click(
            fn=process_addition,
            inputs=query_input,
            outputs=output
        )
        
        gr.Examples(
            examples=[
                "Add 25 and 17",
                "What is 42 plus 58?",
                "Can you sum 123 and 456?",
                "I need to add 7.5 and 2.25",
                "What's the total of 1000 and 2000?"
            ],
            inputs=query_input
        )
    
    return demo

# Create and launch the demo
demo = create_demo()

if __name__ == "__main__":
    demo.launch()