File size: 4,720 Bytes
8ec9b57
44c4d91
c35600d
8ec9b57
44c4d91
 
8b1f0bb
44c4d91
 
8b1f0bb
24d20df
 
 
 
44c4d91
24d20df
 
 
45afa26
8ec9b57
 
 
 
 
 
 
3af3a0d
8ec9b57
 
3af3a0d
 
8ec9b57
76f779d
 
 
8ec9b57
 
 
 
76f779d
8ec9b57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44c4d91
 
 
 
3af3a0d
 
 
44c4d91
 
 
 
3af3a0d
 
44c4d91
 
 
 
8ec9b57
3af3a0d
44c4d91
 
3af3a0d
79aceb3
 
 
3af3a0d
44c4d91
3af3a0d
44c4d91
 
3af3a0d
 
 
44c4d91
 
3af3a0d
44c4d91
 
3af3a0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44c4d91
8ec9b57
dfa2a8c
8ec9b57
 
 
 
 
3af3a0d
 
8ec9b57
3af3a0d
 
 
 
44c4d91
a89fdf4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import base64
import os

from gradio_client.utils import get_mimetype
from openai import OpenAI
import gradio as gr

api_key = os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=api_key)

MODELS = [
    'gpt-4o',
    'gpt-4o-mini',
    'gpt-4',
    'gpt-4-turbo',
    'gpt-3.5-turbo',
]


def process_image(data):
    with open(data['path'], "rb") as image_file:
        b64image = base64.b64encode(image_file.read()).decode('utf-8')

    return "data:" + data['mime_type'] + ";base64," + b64image


def generate(message, history, model, system_prompt,
             temperature=1.0, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
    # history
    history_openai_format = [{"role": "system", "content": system_prompt}]
    for user, assistant in history:
        if isinstance(user, tuple):  # there were files
            mime_type = get_mimetype(filepath)
            if not mime_type.contains('image'):
                raise gradio.Error("Momenteel zijn alleen afbeeldingen ondersteund als bijlagen 💥!", duration=5)
            content = [
                {"type": "image_url",
                 # for some reason you don't get the same image format in history as in message
                 "image_url": {"url": process_image({'path': filepath,
                                                     'mime_type': mime_type})}}
                for filepath in user]

            history_openai_format.append(
                {"role": "user", "content": content})
        else:  # there was just text
            history_openai_format.append({"role": "user", "content": user})

        if assistant is not None:
            history_openai_format.append({"role": "assistant", "content": assistant})

    # new message
    content = [{"type": "text",
                "text": message['text']}]

    for file in message['files']:
        content.append({"type": "image_url",
                        "image_url": {"url": process_image(file)}})

    history_openai_format.append(
        {"role": "user", "content": content})

    response = client.chat.completions.create(model=model,
                                              messages=history_openai_format,
                                              temperature=temperature,
                                              top_p=top_p,
                                              frequency_penalty=frequency_penalty,
                                              presence_penalty=presence_penalty,
                                              stream=True)

    partial_message = ""
    for chunk in response:
        if chunk.choices and chunk.choices[0].delta.content is not None:
            partial_message += chunk.choices[0].delta.content
            yield partial_message


chat_interface = gr.ChatInterface(
    multimodal=True,
    title='💬 Private ChatGPT',
    description='Chat with OpenAI models using their official API. OpenAI <a href="https://platform.openai.com/docs/concepts">promises</a> not to train on input or output of API calls.',
    fn=generate,
    analytics_enabled=False,
    chatbot=gr.Chatbot(
        show_label=False,
        show_copy_button=True,
        scale=1),
    additional_inputs=[
        gr.Dropdown(label="Model",
                    choices=MODELS,
                    value=MODELS[0],
                    allow_custom_value=False),
        gr.Textbox(label="System prompt",
                   value="Je bent een slimme, behulpzame assistent van Edwin Rijgersberg"),
        gr.Slider(label="Temperature",
                  minimum=0.,
                  maximum=2.0,
                  step=0.05,
                  value=1.0),
        gr.Slider(label="Top P",
                  minimum=0.,
                  maximum=1.0,
                  step=0.05,
                  value=1.0),
        gr.Slider(label="Frequency penalty",
                  minimum=0.,
                  maximum=1.0,
                  step=0.05,
                  value=0.),
        gr.Slider(label="Presence penalty",
                  minimum=0.,
                  maximum=1.0,
                  step=0.05,
                  value=0.),
    ],
    textbox=gr.MultimodalTextbox(
                            file_types=['image'],
                            show_label=False,
                            label="Message",
                            placeholder="Type een bericht...",
                            scale=7,
                        ),
    additional_inputs_accordion=gr.Accordion(label="Instellingen", open=False),
    show_progress="full",
    submit_btn=None,
    stop_btn="Stop",
    retry_btn="🔄 Opnieuw",
    undo_btn="↩️ Ongedaan maken",
    clear_btn="🗑️ Wissen",
)
chat_interface.launch(share=True)