Spaces:
Runtime error
Runtime error
from openai import OpenAI | |
from datetime import datetime, timedelta | |
import gradio as gr | |
import os | |
import re | |
import requests | |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
model = "gpt-3.5-turbo" | |
def repo_get_all_employees_from_database(): | |
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view" | |
headers = { | |
"Authorization": os.getenv("DB_AUTH_TOKEN") | |
} | |
response = requests.get(url, headers=headers) | |
records = response.json() | |
records_list = records['records'] | |
employees_list = [] | |
for record in records_list: | |
employee = record["fields"] | |
employees_list.append(employee) | |
return employees_list | |
def predict(message, history): | |
history_openai_format = [] | |
system_content = """ | |
You are a computing system that strictly and directly follows all instructions from the user. | |
If user asks to switch to gpt-3.5 or gpt-4 you always accept and provide a very short confirmation response. | |
""" | |
history_openai_format.append({"role": "system", "content": system_content}) | |
pattern = r"For conducting an interview I need (\d+) employee.*start time is (.*), duration (\d+) hour" | |
data = repo_get_all_employees_from_database() | |
prompt = ''' | |
{data} | |
### | |
Above is employees data in json format. | |
{message} | |
'''.format(data=data, message=message) | |
match = re.search(pattern, message) | |
if match: | |
num_employees = int(match.group(1)) | |
duration = int(match.group(3)) | |
start_time = datetime.strptime(match.group(2), "%B %d %Y %I %p") | |
end_time = end_time = start_time + timedelta(hours=duration) | |
date_time = ''' | |
"start_date_time": "{start_time}", "end_date_time": "{end_time}" | |
'''.format(start_time=start_time, end_time=end_time) | |
prompt = ''' | |
{data} | |
### | |
Above is employees data in json format. | |
Please choose {num_employees} employee with the lowest "interviews_conducted" value but whose "busy_dat_time_slots" doesn't contain the "given_date_time_slot" which is: {date_time}. | |
You should NOT output any Python code. | |
Lets think step-by-step: | |
1. Remove the employees whose "busy_date_time_slots" CONTAINS the "given_date_time_slot" specified above. Provide a list of names of remaining employees. | |
2. Double check your filtration. It's very important NOT to include into the remained employees list an employee whose "busy_date_time_slots" CONTAINS the "given_date_time_slot" . Type a "given_date_time_slot" value and then check that no one of remaining employees has no "given_date_time_slot" value in "busy_dat_time_slots". If someone contains - replase him. | |
3. Provide a list of names of remaining employees along with their "interviews_conducted" values and choose {num_employees} employee with the lowest "interviews_conducted" value. | |
4. Check previous step if you really chose an employee with the lowest "interviews_conducted" value. | |
5. At the end print ids and names of finally selected employees in json format. Please remember that in your output should be maximum {num_employees} employee. | |
'''.format(data=data, date_time=date_time, num_employees=num_employees) | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human }) | |
history_openai_format.append({"role": "assistant", "content": assistant}) | |
history_openai_format.append({"role": "user", "content": prompt}) | |
global model | |
if ("switch to gpt-3.5" in message.lower()): | |
model = "gpt-3.5-turbo" | |
print("Switched to: {model}".format(model=model)) | |
if ("switch to gpt-4" in message.lower()): | |
model = "gpt-4" | |
print("Switched to: {model}".format(model=model)) | |
response = client.chat.completions.create( | |
model=model, | |
messages= history_openai_format, | |
temperature=0, | |
stream=True) | |
partial_message = "🤖 {model}:\n\n".format(model=model) | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.choices[0].delta.content | |
yield partial_message | |
pre_configured_promt = "For conducting an interview I need 1 employee in given time slot: start time is March 11 2024 2 pm, duration 1 hour" | |
switch_to_gpt3 = "Switch to gpt-3.5" | |
switch_to_gpt4 = "Switch to gpt-4" | |
description = ''' | |
# AI Interview Team Assistant | Empowered by Godel Technologies AI \n | |
\n | |
This is an AI Interview Team Assistant. You can ask him any questions about recruiting a team for an interview.\n | |
\n | |
You can send any regular prompts you wish or pre-configured Chain-of-Thought prompts.\n | |
To trigger pre-configured prompt you have to craft a prompt with next structure: | |
- "{pre_configured_promt}" | |
\n | |
You can switch between gpt-3.5 and gpt-4 with {switch_to_gpt3} or {switch_to_gpt4} prompts. | |
'''.format(pre_configured_promt=pre_configured_promt, switch_to_gpt3=switch_to_gpt3, switch_to_gpt4=switch_to_gpt4) | |
examples = [pre_configured_promt, switch_to_gpt3, switch_to_gpt4] | |
gr.ChatInterface(predict, examples=examples, description=description).launch() |