Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -46,6 +46,14 @@ def is_image(file_path):
|
|
| 46 |
except IOError:
|
| 47 |
return False
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
# def respond(
|
| 50 |
# message,
|
| 51 |
# history: list[tuple[str, str]],
|
|
@@ -86,7 +94,7 @@ def get_completion(message,history):
|
|
| 86 |
raise gr.Error("Please input a text query along the image(s).")
|
| 87 |
|
| 88 |
text = message['text']
|
| 89 |
-
|
| 90 |
{"type": "text", "text": text},
|
| 91 |
]
|
| 92 |
if message['files']:
|
|
@@ -98,7 +106,7 @@ def get_completion(message,history):
|
|
| 98 |
"image_url": {
|
| 99 |
"url": public_url,
|
| 100 |
},}
|
| 101 |
-
|
| 102 |
else:
|
| 103 |
raise gr.Error("Only support image files now.")
|
| 104 |
|
|
@@ -109,7 +117,7 @@ def get_completion(message,history):
|
|
| 109 |
continue
|
| 110 |
history_openai_format.append({"role": "user", "content": human })
|
| 111 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
| 112 |
-
history_openai_format.append({"role": "user", "content":
|
| 113 |
# print(history_openai_format)
|
| 114 |
|
| 115 |
system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time.
|
|
@@ -150,6 +158,8 @@ def get_completion(message,history):
|
|
| 150 |
else:
|
| 151 |
response_content = response_data['choices'][0]['message']['content']
|
| 152 |
usage = response_data['usage']
|
|
|
|
|
|
|
| 153 |
|
| 154 |
return response_content
|
| 155 |
|
|
|
|
| 46 |
except IOError:
|
| 47 |
return False
|
| 48 |
|
| 49 |
+
def supbase_insert(user_message,response_content,messages,response):
|
| 50 |
+
from supabase import create_client, Client
|
| 51 |
+
url = os.environ.get('supabase_url')
|
| 52 |
+
key = os.environ.get('supabase_key')
|
| 53 |
+
supabase = create_client(url, key)
|
| 54 |
+
data, count = supabase.table('messages').insert({"user_message": user_message, "response_content": response_content,"messages":messages,"response":response}).execute()
|
| 55 |
+
|
| 56 |
+
|
| 57 |
# def respond(
|
| 58 |
# message,
|
| 59 |
# history: list[tuple[str, str]],
|
|
|
|
| 94 |
raise gr.Error("Please input a text query along the image(s).")
|
| 95 |
|
| 96 |
text = message['text']
|
| 97 |
+
user_message = [
|
| 98 |
{"type": "text", "text": text},
|
| 99 |
]
|
| 100 |
if message['files']:
|
|
|
|
| 106 |
"image_url": {
|
| 107 |
"url": public_url,
|
| 108 |
},}
|
| 109 |
+
user_message.append(content_image)
|
| 110 |
else:
|
| 111 |
raise gr.Error("Only support image files now.")
|
| 112 |
|
|
|
|
| 117 |
continue
|
| 118 |
history_openai_format.append({"role": "user", "content": human })
|
| 119 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
| 120 |
+
history_openai_format.append({"role": "user", "content": user_message})
|
| 121 |
# print(history_openai_format)
|
| 122 |
|
| 123 |
system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time.
|
|
|
|
| 158 |
else:
|
| 159 |
response_content = response_data['choices'][0]['message']['content']
|
| 160 |
usage = response_data['usage']
|
| 161 |
+
|
| 162 |
+
supbase_insert(user_message,response_content,messages,response_data)
|
| 163 |
|
| 164 |
return response_content
|
| 165 |
|