Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -15,46 +15,79 @@ MODELS = [
|
|
15 |
]
|
16 |
|
17 |
|
18 |
-
def generate(message, history, model,
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
23 |
history_openai_format.append({"role": "user", "content": message})
|
24 |
|
25 |
response = client.chat.completions.create(model=model,
|
26 |
messages=history_openai_format,
|
27 |
temperature=temperature,
|
|
|
|
|
|
|
28 |
stream=True)
|
29 |
|
30 |
partial_message = ""
|
31 |
for chunk in response:
|
32 |
-
if chunk.choices[0].delta.content is not None:
|
33 |
-
partial_message
|
34 |
yield partial_message
|
35 |
|
36 |
|
37 |
chat_interface = gr.ChatInterface(
|
38 |
-
|
|
|
39 |
description='Chat with OpenAI models using their official API. OpenAI <a href="https://platform.openai.com/docs/concepts">promises</a> not to train on input or output of API calls.',
|
40 |
fn=generate,
|
|
|
41 |
chatbot=gr.Chatbot(
|
42 |
show_label=False,
|
43 |
show_copy_button=True,
|
44 |
-
|
45 |
-
),
|
46 |
additional_inputs=[
|
47 |
-
gr.Dropdown(label=
|
48 |
choices=MODELS,
|
49 |
value=MODELS[0],
|
50 |
-
allow_custom_value=
|
|
|
|
|
51 |
gr.Slider(label="Temperature",
|
52 |
minimum=0.,
|
53 |
-
maximum=
|
54 |
step=0.05,
|
55 |
value=1.0),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
],
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
)
|
60 |
chat_interface.launch(share=True)
|
|
|
15 |
]
|
16 |
|
17 |
|
18 |
+
def generate(message, history, model, system_prompt,
|
19 |
+
temperature=1.0, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0):
|
20 |
+
|
21 |
+
history_openai_format = [{"role": "system", "content": system_prompt}]
|
22 |
+
for user, assistant in history:
|
23 |
+
history_openai_format.append({"role": "user", "content": user})
|
24 |
history_openai_format.append({"role": "assistant", "content": assistant})
|
25 |
history_openai_format.append({"role": "user", "content": message})
|
26 |
|
27 |
response = client.chat.completions.create(model=model,
|
28 |
messages=history_openai_format,
|
29 |
temperature=temperature,
|
30 |
+
top_p=top_p,
|
31 |
+
frequency_penalty=frequency_penalty,
|
32 |
+
presence_penalty=presence_penalty,
|
33 |
stream=True)
|
34 |
|
35 |
partial_message = ""
|
36 |
for chunk in response:
|
37 |
+
if chunk.choices and chunk.choices[0].delta.content is not None:
|
38 |
+
partial_message += chunk.choices[0].delta.content
|
39 |
yield partial_message
|
40 |
|
41 |
|
42 |
chat_interface = gr.ChatInterface(
|
43 |
+
multimodal=True,
|
44 |
+
title='💬 Private ChatGPT',
|
45 |
description='Chat with OpenAI models using their official API. OpenAI <a href="https://platform.openai.com/docs/concepts">promises</a> not to train on input or output of API calls.',
|
46 |
fn=generate,
|
47 |
+
analytics_enabled=False,
|
48 |
chatbot=gr.Chatbot(
|
49 |
show_label=False,
|
50 |
show_copy_button=True,
|
51 |
+
scale=1),
|
|
|
52 |
additional_inputs=[
|
53 |
+
gr.Dropdown(label="Model",
|
54 |
choices=MODELS,
|
55 |
value=MODELS[0],
|
56 |
+
allow_custom_value=False),
|
57 |
+
gr.Textbox(label="System prompt",
|
58 |
+
value="Je bent een slimme, behulpzame assistent van Edwin Rijgersberg"),
|
59 |
gr.Slider(label="Temperature",
|
60 |
minimum=0.,
|
61 |
+
maximum=2.0,
|
62 |
step=0.05,
|
63 |
value=1.0),
|
64 |
+
gr.Slider(label="Top P",
|
65 |
+
minimum=0.,
|
66 |
+
maximum=1.0,
|
67 |
+
step=0.05,
|
68 |
+
value=1.0),
|
69 |
+
gr.Slider(label="Frequency penalty",
|
70 |
+
minimum=0.,
|
71 |
+
maximum=1.0,
|
72 |
+
step=0.05,
|
73 |
+
value=0.),
|
74 |
+
gr.Slider(label="Presence penalty",
|
75 |
+
minimum=0.,
|
76 |
+
maximum=1.0,
|
77 |
+
step=0.05,
|
78 |
+
value=0.),
|
79 |
],
|
80 |
+
textbox=gr.Textbox(container=False,
|
81 |
+
show_label=False,
|
82 |
+
label="Message",
|
83 |
+
placeholder="Type een bericht...",
|
84 |
+
scale=7),
|
85 |
+
additional_inputs_accordion=gr.Accordion(label="Instellingen", open=False),
|
86 |
+
show_progress="full",
|
87 |
+
submit_btn="Genereer",
|
88 |
+
stop_btn="Stop",
|
89 |
+
retry_btn="🔄 Opnieuw",
|
90 |
+
undo_btn="↩️ Ongedaan maken",
|
91 |
+
clear_btn="🗑️ Wissen",
|
92 |
)
|
93 |
chat_interface.launch(share=True)
|