File size: 7,906 Bytes
32cf150
 
 
 
 
 
9cdf772
32cf150
 
9cdf772
 
 
ca283bb
 
32cf150
9cdf772
0dd906f
32cf150
9cdf772
 
 
 
 
0dd906f
9cdf772
 
 
 
 
0dd906f
9cdf772
 
0dd906f
9cdf772
 
 
 
 
 
0dd906f
 
 
 
 
 
 
 
9cdf772
f2aa2ee
 
 
9bfbbf7
3a00add
f2aa2ee
 
f072981
 
 
aa02a3e
9bfbbf7
f072981
 
 
 
 
 
 
 
 
 
 
9bfbbf7
9cdf772
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa02a3e
 
 
 
 
 
 
 
f072981
 
 
 
 
aa02a3e
 
9cdf772
0dd906f
9cdf772
 
0dd906f
9cdf772
 
9bfbbf7
9cdf772
 
 
0dd906f
 
 
 
 
 
 
 
9bfbbf7
0dd906f
 
9cdf772
 
 
0d622e1
 
 
ddfef73
0d622e1
9cdf772
 
9bfbbf7
db9c3fe
9cdf772
0dd906f
 
 
0ccade8
0dd906f
 
9cdf772
0dd906f
9cdf772
 
 
 
0dd906f
 
 
 
 
412f1d4
db9c3fe
0dd906f
9cdf772
0dd906f
690229e
 
2a23de3
9cdf772
 
0dd906f
 
9cdf772
 
db9c3fe
3fa3ce5
0dd906f
 
 
 
 
9bfbbf7
422d0c5
9cdf772
 
32cf150
 
 
 
 
 
0ccade8
 
 
 
aa02a3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32cf150
0ccade8
0dd906f
32cf150
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import gradio as gr
from huggingface_hub import InferenceClient

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


from google.cloud import storage
from google.oauth2 import service_account
import json
import os
import requests

# upload image to google cloud storage
def upload_file_to_gcs_blob(file):

    google_creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS_JSON")

    creds_json = json.loads(google_creds)
    credentials = service_account.Credentials.from_service_account_info(creds_json)

    # Google Cloud credentials
    storage_client = storage.Client(credentials=credentials, project=creds_json['project_id'])

    bucket_name=os.environ.get('bucket_name')
    bucket = storage_client.bucket(bucket_name)
    
    destination_blob_name = os.path.basename(file)
    blob = bucket.blob(destination_blob_name)

    blob.upload_from_filename(file)

    public_url = blob.public_url
    
    return public_url


from PIL import Image

def is_image(file_path):
    try:
        Image.open(file_path)
        return True
    except IOError:
        return False

from supabase import create_client, Client

def get_supabase_client():
    url = os.environ.get('supabase_url')
    key = os.environ.get('supbase_key')
    supabase = create_client(url, key)
    return supabase

def supbase_insert(user_message,response_content,messages,response,user_name,user_oauth_token):
    supabase = get_supabase_client()
    data, count = supabase.table('messages').insert({"user_message": user_message, "response_content": response_content,"messages":messages,"response":response,"user_name":user_name,"user_oauth_token":user_oauth_token}).execute()

def supabase_insert_user(name,user_name,profile,picture,oauth_token):
    supabase = get_supabase_client()
    data.count = supabase.table('users').insert({"name":name,"user_name":user_name,"profile":profile,"picture":picture,"oauth_token":oauth_token}).execute()


def supabase_fetch_user(user_name):
    supabase = get_supabase_client()
    data,count = supabase.table('users').select("*").eq('user_name',user_name).execute()
    return data
        


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

def get_completion(message,history,profile: gr.OAuthProfile | None,oauth_token: gr.OAuthToken | None):
    # check login
    if profile is None:
        raise gr.Error('Click "Sign in with Hugging Face" to continue')
        
    user_name = profile.username
    user_oauth_token = oauth_token.token
    
    # check if user exists
    user_data = supabase_fetch_user(user_name)
    if not user_data[1]:
        supabase_insert_user(profile.name,user_name,profile.profile,profile.picture,user_oauth_token)

    
    # check if messages are empty
    if message["text"].strip() == "" and not message["files"]:
        raise gr.Error("Please input a query and optionally image(s).")
    
    if message["text"].strip() == "" and message["files"]:
        raise gr.Error("Please input a text query along the image(s).")
    
    text = message['text']
    user_message = [
        {"type": "text", "text": text},
    ]
    if message['files']:
        file = message['files'][0]
        public_url = upload_file_to_gcs_blob(file)
        if is_image(file): # only support image file now
            content_image = {
                "type": "image_url",
                "image_url": {
                    "url": public_url,
                },}
            user_message.append(content_image)
        else:
            raise gr.Error("Only support image files now.")

    history_openai_format = []
    for human, assistant in history:
        # check if there is image info in the history message or empty history messages
        
        if isinstance(human, tuple) or human == "" or assistant is None:
            continue
            
        history_openai_format.append({"role": "user", "content": human })
        history_openai_format.append({"role": "assistant", "content":assistant})
    history_openai_format.append({"role": "user", "content": user_message})
    # print(history_openai_format)
    
    system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time. 
    GPT-4o matches GPT-4 Turbo performance on text in English and code, with significant improvement on text in non-English languages, while also being much faster. 
    GPT-4o is especially better at vision and audio understanding compared to existing models.
    GPT-4o's text and image capabilities are avaliable for users now. More capabilities like audio and video will be rolled out iteratively in the future.
    '''

    
    # headers
    openai_api_key = os.environ.get('openai_api_key')
    headers = {
      'Authorization': f'Bearer {openai_api_key}'
    }

    temperature = 0.7
    max_tokens = 2048

    init_message = [{"role": "system", "content": system_message}]
    messages = init_message + history_openai_format[-5:] #system message + latest 2 round dialogues + user input
    print(messages)
    # request body
    data = {
        'model': 'gpt-4o',  # we use gpt-4o here
        'messages': messages,
        'temperature':temperature, 
        'max_tokens':max_tokens,
        # 'stream':True,
    }

    # get response
    response = requests.post('https://burn.hair/v1/chat/completions', headers=headers, json=data)
    response_data = response.json()
    print(response_data)
    print('-----------------------------------\n')
    if 'error' in response_data:
        response_content = response_data['error']['message']
    else:
        response_content = response_data['choices'][0]['message']['content']
        usage = response_data['usage']

    supbase_insert(user_message,response_content,messages,response_data,user_name,user_oauth_token)
    
    return response_content



"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""

title = "ChatGPT-4o"
description = "This is GPT-4o, you can use the text and image capabilities now. More capabilities like audio and video will be rolled out iteratively in the future. Stay tuned."


with gr.Blocks(fill_height=True) as demo:
    gr.Markdown(
        "# ChatGPT-4o"
        "\n\nThis is GPT-4o, you can use the text and image capabilities now. More capabilities like audio and video will be rolled out iteratively in the future. Stay tuned."
    )
    gr.LoginButton()

    gr.ChatInterface(
        get_completion,
        multimodal=True,
        # title = title,
        # description = description
        # additional_inputs=[
        #     gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        #     gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        #     gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        # ],
    )

demo.queue(default_concurrency_limit=5)


if __name__ == "__main__":
    demo.launch()