ceymox commited on
Commit
34917ad
·
verified ·
1 Parent(s): dab29a1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +312 -0
app.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import re
4
+ import datetime
5
+ from google.oauth2 import service_account
6
+ from googleapiclient.discovery import build
7
+ import gradio as gr
8
+ import torch
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
10
+
11
+ # Google Calendar API setup with Service Account
12
+ SCOPES = ['https://www.googleapis.com/auth/calendar']
13
+ # Calendar ID - use your calendar ID here
14
+ CALENDAR_ID = os.getenv('CALENDAR_ID', '26f5856049fab3d6648a2f1dea57c70370de6bc1629a5182be1511b0e75d11d3@group.calendar.google.com')
15
+ # Path to your service account key file
16
+ SERVICE_ACCOUNT_FILE = os.getenv('SERVICE_ACCOUNT_FILE', 'service-account-key.json')
17
+
18
+ # Load Llama 3.1 model
19
+ MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct"
20
+
21
+ def get_calendar_service():
22
+ """Set up Google Calendar service using service account"""
23
+ # Load service account info from environment or file
24
+ if os.getenv('SERVICE_ACCOUNT_INFO'):
25
+ # For Spaces deployment, load from environment variable
26
+ service_account_info = json.loads(os.getenv('SERVICE_ACCOUNT_INFO'))
27
+ credentials = service_account.Credentials.from_service_account_info(
28
+ service_account_info, scopes=SCOPES)
29
+ else:
30
+ # For local development, load from file
31
+ credentials = service_account.Credentials.from_service_account_file(
32
+ SERVICE_ACCOUNT_FILE, scopes=SCOPES)
33
+
34
+ service = build('calendar', 'v3', credentials=credentials)
35
+ return service
36
+
37
+ def format_time(time_str):
38
+ """Format time input to ensure 24-hour format"""
39
+ # Handle AM/PM format
40
+ time_str = time_str.strip().upper()
41
+ is_pm = 'PM' in time_str
42
+
43
+ # Remove AM/PM
44
+ time_str = time_str.replace('AM', '').replace('PM', '').strip()
45
+
46
+ # Parse hours and minutes
47
+ if ':' in time_str:
48
+ parts = time_str.split(':')
49
+ hours = int(parts[0])
50
+ minutes = int(parts[1]) if len(parts) > 1 else 0
51
+ else:
52
+ hours = int(time_str)
53
+ minutes = 0
54
+
55
+ # Convert to 24-hour format if needed
56
+ if is_pm and hours < 12:
57
+ hours += 12
58
+ elif not is_pm and hours == 12:
59
+ hours = 0
60
+
61
+ # Return formatted time
62
+ return f"{hours:02d}:{minutes:02d}"
63
+
64
+ def add_event_to_calendar(name, date, time_str, duration_minutes=60):
65
+ """Add an event to Google Calendar using Indian time zone"""
66
+ service = get_calendar_service()
67
+
68
+ # Format time properly
69
+ formatted_time = format_time(time_str)
70
+ print(f"Input time: {time_str}, Formatted time: {formatted_time}")
71
+
72
+ # For debugging - show the date and time being used
73
+ print(f"Using date: {date}, time: {formatted_time}")
74
+
75
+ # Create event
76
+ event = {
77
+ 'summary': f"Appointment with {name}",
78
+ 'description': f"Meeting with {name}",
79
+ 'start': {
80
+ 'dateTime': f"{date}T{formatted_time}:00",
81
+ 'timeZone': 'Asia/Kolkata', # Indian Standard Time
82
+ },
83
+ 'end': {
84
+ 'dateTime': f"{date}T{formatted_time}:00", # Will add duration below
85
+ 'timeZone': 'Asia/Kolkata', # Indian Standard Time
86
+ },
87
+ }
88
+
89
+ # Calculate end time properly in the same time zone
90
+ start_dt = datetime.datetime.fromisoformat(f"{date}T{formatted_time}:00")
91
+ end_dt = start_dt + datetime.timedelta(minutes=duration_minutes)
92
+ event['end']['dateTime'] = end_dt.isoformat()
93
+
94
+ print(f"Event start: {event['start']['dateTime']} {event['start']['timeZone']}")
95
+ print(f"Event end: {event['end']['dateTime']} {event['end']['timeZone']}")
96
+
97
+ try:
98
+ # Add to calendar with detailed error handling
99
+ event = service.events().insert(calendarId=CALENDAR_ID, body=event).execute()
100
+ print(f"Event created successfully: {event.get('htmlLink')}")
101
+ # Return True instead of the link to indicate success
102
+ return True
103
+ except Exception as e:
104
+ print(f"Error creating event: {str(e)}")
105
+ print(f"Calendar ID: {CALENDAR_ID}")
106
+ print(f"Event details: {json.dumps(event, indent=2)}")
107
+ raise
108
+
109
+ # Load model on startup to avoid loading it for each request
110
+ @gr.utils.memoize(utils=["torch"])
111
+ def load_llama_model():
112
+ """Load the Llama 3.1 model"""
113
+ print("Loading Llama 3.1 model...")
114
+
115
+ # Spaces will handle the quantization, so we use default loading
116
+ # or you can adjust quantization based on available resources
117
+ model = AutoModelForCausalLM.from_pretrained(
118
+ MODEL_ID,
119
+ torch_dtype=torch.bfloat16,
120
+ device_map="auto",
121
+ low_cpu_mem_usage=True,
122
+ use_cache=True
123
+ )
124
+
125
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
126
+
127
+ return model, tokenizer
128
+
129
+ def extract_function_call(text):
130
+ """Extract function call parameters from Llama's response text"""
131
+ # Look for JSON-like structure in the response
132
+ json_pattern = r'```json\s*({.*?})\s*```'
133
+ matches = re.findall(json_pattern, text, re.DOTALL)
134
+
135
+ if matches:
136
+ try:
137
+ return json.loads(matches[0])
138
+ except json.JSONDecodeError:
139
+ pass
140
+
141
+ # Try to find a pattern like {"name": "John", "date": "2025-05-10", "time": "14:30"}
142
+ json_pattern = r'{.*?"name".*?:.*?"(.*?)".*?"date".*?:.*?"(.*?)".*?"time".*?:.*?"(.*?)".*?}'
143
+ matches = re.findall(json_pattern, text, re.DOTALL)
144
+
145
+ if matches and len(matches[0]) == 3:
146
+ name, date, time = matches[0]
147
+ return {"name": name, "date": date, "time": time}
148
+
149
+ # If no JSON structure is found, try to extract individual fields
150
+ name_match = re.search(r'name["\s:]+([^",]+)', text, re.IGNORECASE)
151
+ date_match = re.search(r'date["\s:]+([^",]+)', text, re.IGNORECASE)
152
+ time_match = re.search(r'time["\s:]+([^",]+)', text, re.IGNORECASE)
153
+
154
+ result = {}
155
+ if name_match:
156
+ result["name"] = name_match.group(1).strip()
157
+ if date_match:
158
+ result["date"] = date_match.group(1).strip()
159
+ if time_match:
160
+ result["time"] = time_match.group(1).strip()
161
+
162
+ return result if result else None
163
+
164
+ def process_with_llama(user_input, conversation_history, llm_pipeline):
165
+ """Process user input with Llama 3.1 model, handling function calling"""
166
+ try:
167
+ # Build conversation context with function calling instructions
168
+ function_description = """
169
+ You have access to the following function:
170
+
171
+ book_appointment
172
+ Description: Book an appointment in Google Calendar
173
+ Parameters:
174
+ - name: string, Name of the person for the appointment
175
+ - date: string, Date of appointment in YYYY-MM-DD format
176
+ - time: string, Time of appointment (e.g., '2:30 PM', '14:30')
177
+
178
+ When you need to book an appointment, output the function call in JSON format like this:
179
+ ```json
180
+ {"name": "John Doe", "date": "2025-05-10", "time": "14:30"}
181
+ ```
182
+ """
183
+
184
+ # Create a prompt that includes conversation history and function description
185
+ prompt = "You are an appointment booking assistant for Indian users. "
186
+ prompt += "You help book appointments in Google Calendar using Indian Standard Time. "
187
+ prompt += function_description
188
+
189
+ # Add conversation history to the prompt
190
+ for message in conversation_history:
191
+ if message["role"] == "user":
192
+ prompt += f"\n\nUser: {message['content']}"
193
+ elif message["role"] == "assistant":
194
+ prompt += f"\n\nAssistant: {message['content']}"
195
+
196
+ # Add the current user message
197
+ prompt += f"\n\nUser: {user_input}\n\nAssistant:"
198
+
199
+ # Generate response from Llama
200
+ response = llm_pipeline(prompt, max_new_tokens=1024, do_sample=True, temperature=0.1)
201
+ llama_response = response[0]['generated_text'][len(prompt):].strip()
202
+
203
+ # Check if Llama wants to call a function
204
+ function_args = extract_function_call(llama_response)
205
+
206
+ if function_args and "name" in function_args and "date" in function_args and "time" in function_args:
207
+ print(f"Function arguments from Llama: {json.dumps(function_args, indent=2)}")
208
+
209
+ # Add to calendar
210
+ try:
211
+ # Call the function but ignore the return value (we don't need the link)
212
+ add_event_to_calendar(
213
+ function_args["name"],
214
+ function_args["date"],
215
+ function_args["time"]
216
+ )
217
+
218
+ # Construct a response that confirms booking but doesn't include a link
219
+ final_response = f"Great! I've booked an appointment for {function_args['name']} on {function_args['date']} at {function_args['time']} (Indian Standard Time). The appointment has been added to your calendar."
220
+
221
+ except Exception as e:
222
+ final_response = f"I attempted to book an appointment, but encountered an error: {str(e)}"
223
+
224
+ # Update conversation history
225
+ conversation_history.append({"role": "user", "content": user_input})
226
+ conversation_history.append({"role": "assistant", "content": final_response})
227
+
228
+ return final_response, conversation_history
229
+ else:
230
+ # No function call detected, just return Llama's response
231
+ conversation_history.append({"role": "user", "content": user_input})
232
+ conversation_history.append({"role": "assistant", "content": llama_response})
233
+
234
+ return llama_response, conversation_history
235
+
236
+ except Exception as e:
237
+ print(f"Error in process_with_llama: {str(e)}")
238
+ return f"Error: {str(e)}", conversation_history
239
+
240
+ # System prompt for conversation
241
+ system_prompt = """You are an appointment booking assistant for Indian users.
242
+ When someone asks to book an appointment, collect:
243
+
244
+ 1. Their name
245
+ 2. The date (in YYYY-MM-DD format)
246
+ 3. The time (in either 12-hour format like '2:30 PM' or 24-hour format like '14:30')
247
+
248
+ All appointments are in Indian Standard Time (IST).
249
+
250
+ If any information is missing, ask for it politely. Once you have all details, use the
251
+ book_appointment function to add it to the calendar.
252
+
253
+ IMPORTANT: After booking an appointment, simply confirm the details. Do not include
254
+ any links or mention viewing the appointment details. The user does not need to click
255
+ any links to view their appointment.
256
+
257
+ IMPORTANT: Make sure to interpret times correctly. If a user says '2 PM' or just '2',
258
+ this likely means 2:00 PM (14:00) in 24-hour format."""
259
+
260
+ # Initialize model and tokenizer once at startup
261
+ model, tokenizer = load_llama_model()
262
+
263
+ # Create text generation pipeline
264
+ llm_pipeline = pipeline(
265
+ "text-generation",
266
+ model=model,
267
+ tokenizer=tokenizer,
268
+ return_full_text=True
269
+ )
270
+
271
+ # Create Gradio interface
272
+ def create_interface():
273
+ # Initialize conversation history
274
+ conversation_history = [{"role": "system", "content": system_prompt}]
275
+
276
+ with gr.Blocks() as app:
277
+ gr.Markdown("# Indian Time Zone Appointment Booking with Llama 3.1")
278
+ gr.Markdown("Say something like 'Book an appointment for John on May 10th at 2pm'")
279
+
280
+ # Chat interface
281
+ chatbot = gr.Chatbot()
282
+ msg = gr.Textbox(placeholder="Type your message here...", label="Message")
283
+ clear = gr.Button("Clear Chat")
284
+
285
+ # State for conversation history
286
+ state = gr.State(conversation_history)
287
+
288
+ # Handle user input
289
+ def user_input(message, history, conv_history):
290
+ if message.strip() == "":
291
+ return "", history, conv_history
292
+
293
+ # Get response from Llama
294
+ response, updated_conv_history = process_with_llama(message, conv_history, llm_pipeline)
295
+
296
+ # Update chat display
297
+ history.append((message, response))
298
+
299
+ return "", history, updated_conv_history
300
+
301
+ # Connect components
302
+ msg.submit(user_input, [msg, chatbot, state], [msg, chatbot, state])
303
+ clear.click(lambda: ([], [{"role": "system", "content": system_prompt}]), None, [chatbot, state])
304
+
305
+ return app
306
+
307
+ # Create and launch the app
308
+ app = create_interface()
309
+
310
+ # Launch for Spaces
311
+ if __name__ == "__main__":
312
+ app.launch()