Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,15 +5,13 @@ import os
|
|
5 |
import re
|
6 |
import requests
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
auth_token = os.getenv("DB_AUTH_TOKEN")
|
12 |
|
13 |
def repo_get_all_employees_from_database():
|
14 |
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view"
|
15 |
headers = {
|
16 |
-
"Authorization":
|
17 |
}
|
18 |
response = requests.get(url, headers=headers)
|
19 |
records = response.json()
|
@@ -96,22 +94,18 @@ def predict(message, history):
|
|
96 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
97 |
history_openai_format.append({"role": "user", "content": prompt})
|
98 |
|
99 |
-
model
|
|
|
100 |
if ("switch to gpt-3.5" in message.lower()):
|
101 |
model = "gpt-3.5-turbo"
|
102 |
-
print("Switched to:")
|
103 |
-
print(model)
|
104 |
|
105 |
if ("switch to gpt-4" in message.lower()):
|
106 |
model = "gpt-4"
|
107 |
-
print("Switched to:")
|
108 |
-
print(model)
|
109 |
-
|
110 |
-
print("Actual model:")
|
111 |
-
print(model)
|
112 |
|
113 |
response = client.chat.completions.create(
|
114 |
-
model=
|
115 |
messages= history_openai_format,
|
116 |
temperature=0,
|
117 |
stream=True)
|
@@ -132,6 +126,9 @@ This is an AI Interview Team Assistant. You can ask him any questions about recr
|
|
132 |
You can send any regular prompts you wish or pre-configured Chain-of-Thought prompts.\n
|
133 |
To trigger pre-configured prompt you have to craft a prompt with next structure:
|
134 |
- "{pre_configured_promt}"
|
|
|
|
|
|
|
135 |
'''.format(pre_configured_promt=pre_configured_promt)
|
136 |
|
137 |
examples = [pre_configured_promt]
|
|
|
5 |
import re
|
6 |
import requests
|
7 |
|
8 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
9 |
+
model = "gpt-3.5-turbo"
|
|
|
|
|
10 |
|
11 |
def repo_get_all_employees_from_database():
|
12 |
url = "https://api.airtable.com/v0/appopGmlHujYnd6Vw/Interviewers?maxRecords=100&view=Grid%20view"
|
13 |
headers = {
|
14 |
+
"Authorization": os.getenv("DB_AUTH_TOKEN")
|
15 |
}
|
16 |
response = requests.get(url, headers=headers)
|
17 |
records = response.json()
|
|
|
94 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
95 |
history_openai_format.append({"role": "user", "content": prompt})
|
96 |
|
97 |
+
global model
|
98 |
+
|
99 |
if ("switch to gpt-3.5" in message.lower()):
|
100 |
model = "gpt-3.5-turbo"
|
101 |
+
print("Switched to: {model}".format(model=model))
|
|
|
102 |
|
103 |
if ("switch to gpt-4" in message.lower()):
|
104 |
model = "gpt-4"
|
105 |
+
print("Switched to: {model}".format(model=model))
|
|
|
|
|
|
|
|
|
106 |
|
107 |
response = client.chat.completions.create(
|
108 |
+
model=model,
|
109 |
messages= history_openai_format,
|
110 |
temperature=0,
|
111 |
stream=True)
|
|
|
126 |
You can send any regular prompts you wish or pre-configured Chain-of-Thought prompts.\n
|
127 |
To trigger pre-configured prompt you have to craft a prompt with next structure:
|
128 |
- "{pre_configured_promt}"
|
129 |
+
\n
|
130 |
+
You can switch between gpt-3.5 and gpt-4 with "Switch to gpt-3.5" or "Switch to gpt-4" prompts.\n
|
131 |
+
Language Model currently under the hood: {model}
|
132 |
'''.format(pre_configured_promt=pre_configured_promt)
|
133 |
|
134 |
examples = [pre_configured_promt]
|