ceymox commited on
Commit
6b3240b
·
verified ·
1 Parent(s): 82b6196

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -276
app.py CHANGED
@@ -1,297 +1,137 @@
1
- import os
2
  import json
3
- import re
4
- import datetime
5
- from google.oauth2 import service_account
6
- from googleapiclient.discovery import build
7
  import gradio as gr
8
- import torch
9
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
10
- from huggingface_hub import login
11
-
12
- # Login to Hugging Face if token is provided (for accessing gated models)
13
- if os.getenv("HF_TOKEN"):
14
- login(os.getenv("HF_TOKEN"))
15
-
16
- # Google Calendar API setup with Service Account
17
- SCOPES = ['https://www.googleapis.com/auth/calendar']
18
- # Calendar ID - use your calendar ID here
19
- CALENDAR_ID = os.getenv('CALENDAR_ID', '26f5856049fab3d6648a2f1dea57c70370de6bc1629a5182be1511b0e75d11d3@group.calendar.google.com')
20
-
21
- # Load Llama 3.1 model
22
- MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct"
23
-
24
- def get_calendar_service():
25
- """Set up Google Calendar service using service account"""
26
- # Load service account info from environment
27
- service_account_info = json.loads(os.getenv('SERVICE_ACCOUNT_INFO', '{}'))
28
- credentials = service_account.Credentials.from_service_account_info(
29
- service_account_info, scopes=SCOPES)
30
-
31
- service = build('calendar', 'v3', credentials=credentials)
32
- return service
33
-
34
- def format_time(time_str):
35
- """Format time input to ensure 24-hour format"""
36
- # Handle AM/PM format
37
- time_str = time_str.strip().upper()
38
- is_pm = 'PM' in time_str
39
-
40
- # Remove AM/PM
41
- time_str = time_str.replace('AM', '').replace('PM', '').strip()
42
-
43
- # Parse hours and minutes
44
- if ':' in time_str:
45
- parts = time_str.split(':')
46
- hours = int(parts[0])
47
- minutes = int(parts[1]) if len(parts) > 1 else 0
48
- else:
49
- hours = int(time_str)
50
- minutes = 0
51
-
52
- # Convert to 24-hour format if needed
53
- if is_pm and hours < 12:
54
- hours += 12
55
- elif not is_pm and hours == 12:
56
- hours = 0
57
-
58
- # Return formatted time
59
- return f"{hours:02d}:{minutes:02d}"
60
-
61
- def add_event_to_calendar(name, date, time_str, duration_minutes=60):
62
- """Add an event to Google Calendar using Indian time zone"""
63
- service = get_calendar_service()
64
-
65
- # Format time properly
66
- formatted_time = format_time(time_str)
67
- print(f"Input time: {time_str}, Formatted time: {formatted_time}")
68
-
69
- # For debugging - show the date and time being used
70
- print(f"Using date: {date}, time: {formatted_time}")
71
-
72
- # Create event
73
- event = {
74
- 'summary': f"Appointment with {name}",
75
- 'description': f"Meeting with {name}",
76
- 'start': {
77
- 'dateTime': f"{date}T{formatted_time}:00",
78
- 'timeZone': 'Asia/Kolkata', # Indian Standard Time
79
- },
80
- 'end': {
81
- 'dateTime': f"{date}T{formatted_time}:00", # Will add duration below
82
- 'timeZone': 'Asia/Kolkata', # Indian Standard Time
83
- },
84
  }
85
 
86
- # Calculate end time properly in the same time zone
87
- start_dt = datetime.datetime.fromisoformat(f"{date}T{formatted_time}:00")
88
- end_dt = start_dt + datetime.timedelta(minutes=duration_minutes)
89
- event['end']['dateTime'] = end_dt.isoformat()
90
-
91
- print(f"Event start: {event['start']['dateTime']} {event['start']['timeZone']}")
92
- print(f"Event end: {event['end']['dateTime']} {event['end']['timeZone']}")
93
-
94
- try:
95
- # Add to calendar with detailed error handling
96
- event = service.events().insert(calendarId=CALENDAR_ID, body=event).execute()
97
- print(f"Event created successfully: {event.get('htmlLink')}")
98
- # Return True instead of the link to indicate success
99
- return True
100
- except Exception as e:
101
- print(f"Error creating event: {str(e)}")
102
- print(f"Calendar ID: {CALENDAR_ID}")
103
- print(f"Event details: {json.dumps(event, indent=2)}")
104
- raise
105
-
106
- def extract_function_call(text):
107
- """Extract function call parameters from Llama's response text"""
108
- # Look for JSON-like structure in the response
109
- json_pattern = r'```json\s*({.*?})\s*```'
110
- matches = re.findall(json_pattern, text, re.DOTALL)
111
-
112
- if matches:
113
- try:
114
- return json.loads(matches[0])
115
- except json.JSONDecodeError:
116
- pass
117
-
118
- # Try to find a pattern like {"name": "John", "date": "2025-05-10", "time": "14:30"}
119
- json_pattern = r'{.*?"name".*?:.*?"(.*?)".*?"date".*?:.*?"(.*?)".*?"time".*?:.*?"(.*?)".*?}'
120
- matches = re.findall(json_pattern, text, re.DOTALL)
121
-
122
- if matches and len(matches[0]) == 3:
123
- name, date, time = matches[0]
124
- return {"name": name, "date": date, "time": time}
125
-
126
- # If no JSON structure is found, try to extract individual fields
127
- name_match = re.search(r'name["\s:]+([^",]+)', text, re.IGNORECASE)
128
- date_match = re.search(r'date["\s:]+([^",]+)', text, re.IGNORECASE)
129
- time_match = re.search(r'time["\s:]+([^",]+)', text, re.IGNORECASE)
130
-
131
- result = {}
132
- if name_match:
133
- result["name"] = name_match.group(1).strip()
134
- if date_match:
135
- result["date"] = date_match.group(1).strip()
136
- if time_match:
137
- result["time"] = time_match.group(1).strip()
138
 
139
- return result if result else None
140
 
141
- def process_with_llama(user_input, conversation_history, llm_pipeline):
142
- """Process user input with Llama 3.1 model, handling function calling"""
143
  try:
144
- # Build conversation context with function calling instructions
145
- function_description = """
146
- You have access to the following function:
147
-
148
- book_appointment
149
- Description: Book an appointment in Google Calendar
150
- Parameters:
151
- - name: string, Name of the person for the appointment
152
- - date: string, Date of appointment in YYYY-MM-DD format
153
- - time: string, Time of appointment (e.g., '2:30 PM', '14:30')
154
-
155
- When you need to book an appointment, output the function call in JSON format like this:
156
- ```json
157
- {"name": "John Doe", "date": "2025-05-10", "time": "14:30"}
158
- ```
159
- """
160
-
161
- # Create a prompt that includes conversation history and function description
162
- prompt = "You are an appointment booking assistant for Indian users. "
163
- prompt += "You help book appointments in Google Calendar using Indian Standard Time. "
164
- prompt += function_description
165
-
166
- # Add conversation history to the prompt
167
- for message in conversation_history:
168
- if message["role"] == "user":
169
- prompt += f"\n\nUser: {message['content']}"
170
- elif message["role"] == "assistant":
171
- prompt += f"\n\nAssistant: {message['content']}"
172
 
173
- # Add the current user message
174
- prompt += f"\n\nUser: {user_input}\n\nAssistant:"
175
-
176
- # Generate response from Llama
177
- response = llm_pipeline(prompt, max_new_tokens=1024, do_sample=True, temperature=0.1)
178
- llama_response = response[0]['generated_text'][len(prompt):].strip()
179
-
180
- # Check if Llama wants to call a function
181
- function_args = extract_function_call(llama_response)
182
-
183
- if function_args and "name" in function_args and "date" in function_args and "time" in function_args:
184
- print(f"Function arguments from Llama: {json.dumps(function_args, indent=2)}")
185
 
186
- # Add to calendar
187
- try:
188
- # Call the function but ignore the return value (we don't need the link)
189
- add_event_to_calendar(
190
- function_args["name"],
191
- function_args["date"],
192
- function_args["time"]
193
- )
194
-
195
- # Construct a response that confirms booking but doesn't include a link
196
- final_response = f"Great! I've booked an appointment for {function_args['name']} on {function_args['date']} at {function_args['time']} (Indian Standard Time). The appointment has been added to your calendar."
197
 
198
- except Exception as e:
199
- final_response = f"I attempted to book an appointment, but encountered an error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- # Update conversation history
202
- conversation_history.append({"role": "user", "content": user_input})
203
- conversation_history.append({"role": "assistant", "content": final_response})
204
-
205
- return final_response, conversation_history
206
- else:
207
- # No function call detected, just return Llama's response
208
- conversation_history.append({"role": "user", "content": user_input})
209
- conversation_history.append({"role": "assistant", "content": llama_response})
210
 
211
- return llama_response, conversation_history
212
-
213
  except Exception as e:
214
- print(f"Error in process_with_llama: {str(e)}")
215
- return f"Error: {str(e)}", conversation_history
216
-
217
- # System prompt for conversation
218
- system_prompt = """You are an appointment booking assistant for Indian users.
219
- When someone asks to book an appointment, collect:
220
-
221
- 1. Their name
222
- 2. The date (in YYYY-MM-DD format)
223
- 3. The time (in either 12-hour format like '2:30 PM' or 24-hour format like '14:30')
224
-
225
- All appointments are in Indian Standard Time (IST).
226
-
227
- If any information is missing, ask for it politely. Once you have all details, use the
228
- book_appointment function to add it to the calendar.
229
-
230
- IMPORTANT: After booking an appointment, simply confirm the details. Do not include
231
- any links or mention viewing the appointment details. The user does not need to click
232
- any links to view their appointment.
233
-
234
- IMPORTANT: Make sure to interpret times correctly. If a user says '2 PM' or just '2',
235
- this likely means 2:00 PM (14:00) in 24-hour format."""
236
-
237
- # Initialize model and pipeline
238
- def load_model_and_pipeline():
239
- model = AutoModelForCausalLM.from_pretrained(
240
- MODEL_ID,
241
- torch_dtype=torch.bfloat16,
242
- device_map="auto",
243
- low_cpu_mem_usage=True
244
- )
245
-
246
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
247
-
248
- # Create text generation pipeline
249
- llm_pipeline = pipeline(
250
- "text-generation",
251
- model=model,
252
- tokenizer=tokenizer,
253
- return_full_text=True,
254
- max_new_tokens=1024
255
- )
256
-
257
- return llm_pipeline
258
-
259
- # Initialize conversation history with system prompt
260
- conversation_history = [{"role": "system", "content": system_prompt}]
261
-
262
- # Load model and pipeline at startup
263
- llm_pipe = load_model_and_pipeline()
264
 
265
  # Create Gradio interface
266
- with gr.Blocks(title="Calendar Booking Assistant") as demo:
267
- gr.Markdown("# Indian Time Zone Appointment Booking with Llama 3.1")
268
- gr.Markdown("Say something like 'Book an appointment for John on May 10th at 2pm'")
269
-
270
- # Chat interface
271
- chatbot = gr.Chatbot()
272
- msg = gr.Textbox(placeholder="Type your message here...", label="Message")
273
- clear = gr.Button("Clear Chat")
274
-
275
- # State for conversation history
276
- state = gr.State(conversation_history)
277
-
278
- # Handle user input
279
- def user_input(message, history, conv_history):
280
- if message.strip() == "":
281
- return "", history, conv_history
282
 
283
- # Get response from Llama
284
- response, updated_conv_history = process_with_llama(message, conv_history, llm_pipe)
 
 
 
 
 
 
 
 
285
 
286
- # Update chat display
287
- history.append((message, response))
 
 
 
288
 
289
- return "", history, updated_conv_history
290
-
291
- # Connect components
292
- msg.submit(user_input, [msg, chatbot, state], [msg, chatbot, state])
293
- clear.click(lambda: ([], [{"role": "system", "content": system_prompt}]), None, [chatbot, state])
 
 
 
 
 
 
 
 
 
 
294
 
295
- # Launch the app
296
  if __name__ == "__main__":
297
  demo.launch()
 
 
1
  import json
 
 
 
 
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+
5
+ # Function to call the Llama 3.1 8B model through Hugging Face API
6
+ def call_llama_model(user_query):
7
+ # Initialize the inference client
8
+ client = InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct")
9
+
10
+ # Define the addition function schema
11
+ function_schema = {
12
+ "name": "add_numbers",
13
+ "description": "Add two numbers together",
14
+ "parameters": {
15
+ "type": "object",
16
+ "properties": {
17
+ "num1": {
18
+ "type": "number",
19
+ "description": "First number to add"
20
+ },
21
+ "num2": {
22
+ "type": "number",
23
+ "description": "Second number to add"
24
+ }
25
+ },
26
+ "required": ["num1", "num2"]
27
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  }
29
 
30
+ # Create the system prompt with function definition
31
+ system_prompt = f"""You have access to the following function:
32
+ {json.dumps(function_schema, indent=2)}
33
+
34
+ Your task is to extract two numbers from the user's query and call the add_numbers function.
35
+ Format your response as JSON with the function name and parameters.
36
+ Only respond with valid JSON containing the function call, nothing else.
37
+ """
38
+
39
+ # Call the model
40
+ response = client.text_generation(
41
+ prompt=f"<|system|>\n{system_prompt}\n<|user|>\n{user_query}\n<|assistant|>",
42
+ max_new_tokens=256,
43
+ temperature=0.1,
44
+ return_full_text=False
45
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ return response
48
 
49
+ # Function to parse the model response and calculate the result
50
+ def process_addition(query):
51
  try:
52
+ # Get model response
53
+ model_response = call_llama_model(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ # Try to parse the JSON response
56
+ try:
57
+ # Find the JSON part in the response (it might have additional text)
58
+ json_start = model_response.find('{')
59
+ json_end = model_response.rfind('}') + 1
 
 
 
 
 
 
 
60
 
61
+ if json_start >= 0 and json_end > json_start:
62
+ json_str = model_response[json_start:json_end]
63
+ response_data = json.loads(json_str)
64
+ else:
65
+ return f"Error: No valid JSON found in response: {model_response}"
66
+
67
+ # Check if it has a function call
68
+ if "function_call" in response_data:
69
+ function_name = response_data["function_call"]["name"]
70
+ params = response_data["function_call"]["parameters"]
 
71
 
72
+ if function_name == "add_numbers":
73
+ num1 = params["num1"]
74
+ num2 = params["num2"]
75
+ result = num1 + num2
76
+
77
+ # Return a formatted response
78
+ return f"""
79
+ Model parsed your query as:
80
+ - First number: {num1}
81
+ - Second number: {num2}
82
+
83
+ Function called: {function_name}
84
+ Result: {result}
85
+ """
86
+ else:
87
+ return f"Unknown function: {function_name}"
88
+ else:
89
+ return f"No function call found in response: {response_data}"
90
 
91
+ except json.JSONDecodeError as e:
92
+ return f"Error parsing JSON: {str(e)}\nRaw response: {model_response}"
 
 
 
 
 
 
 
93
 
 
 
94
  except Exception as e:
95
+ return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  # Create Gradio interface
98
+ def create_demo():
99
+ with gr.Blocks() as demo:
100
+ gr.Markdown("# Llama 3.1 Function Calling Demo: Addition")
101
+ gr.Markdown("Enter a query asking to add two numbers (e.g., 'Add 25 and 17' or 'What's 42 plus 58?')")
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ with gr.Row():
104
+ with gr.Column():
105
+ query_input = gr.Textbox(
106
+ label="Your Query",
107
+ placeholder="Add 25 and 17"
108
+ )
109
+ submit_btn = gr.Button("Calculate")
110
+
111
+ with gr.Column():
112
+ output = gr.Textbox(label="Result")
113
 
114
+ submit_btn.click(
115
+ fn=process_addition,
116
+ inputs=query_input,
117
+ outputs=output
118
+ )
119
 
120
+ gr.Examples(
121
+ examples=[
122
+ "Add 25 and 17",
123
+ "What is 42 plus 58?",
124
+ "Can you sum 123 and 456?",
125
+ "I need to add 7.5 and 2.25",
126
+ "What's the total of 1000 and 2000?"
127
+ ],
128
+ inputs=query_input
129
+ )
130
+
131
+ return demo
132
+
133
+ # Create and launch the demo
134
+ demo = create_demo()
135
 
 
136
  if __name__ == "__main__":
137
  demo.launch()