Spaces:
Running
Running
Update chat_demo.py
Browse files- chat_demo.py +6 -5
chat_demo.py
CHANGED
@@ -93,15 +93,16 @@ with gr.Blocks() as demo:
|
|
93 |
#download=gr.DownloadButton(label="Download Conversation", value=None)
|
94 |
conv_state = gr.State()
|
95 |
orig_path = gr.State()
|
96 |
-
chatbot = gr.Chatbot(placeholder="Have fun with the AI!")
|
|
|
|
|
|
|
|
|
97 |
chat = gr.ChatInterface(
|
98 |
openai_call,
|
99 |
type="messages",
|
100 |
chatbot=chatbot,
|
101 |
-
additional_inputs=
|
102 |
-
gr.Textbox("You are a helpful AI assistant.", label="System Prompt"),
|
103 |
-
gr.Slider(30, 8192, value=2048, label="Max new tokens"),
|
104 |
-
],
|
105 |
additional_outputs=[conv_state],
|
106 |
title="Edge level LLM Chat demo",
|
107 |
description="In this demo, you can chat with sub-1B param range LLM - they are small enough to run with reasonable speed on most end user device. **Warning:** Do not input sensitive info - assume everything is public!"
|
|
|
93 |
#download=gr.DownloadButton(label="Download Conversation", value=None)
|
94 |
conv_state = gr.State()
|
95 |
orig_path = gr.State()
|
96 |
+
chatbot = gr.Chatbot(placeholder="Have fun with the AI!", editable='all', show_copy_button=True)
|
97 |
+
additional_inputs=[
|
98 |
+
gr.Textbox("You are a helpful AI assistant.", label="System Prompt"),
|
99 |
+
gr.Slider(30, 8192, value=2048, label="Max new tokens"),
|
100 |
+
]
|
101 |
chat = gr.ChatInterface(
|
102 |
openai_call,
|
103 |
type="messages",
|
104 |
chatbot=chatbot,
|
105 |
+
additional_inputs=additional_inputs,
|
|
|
|
|
|
|
106 |
additional_outputs=[conv_state],
|
107 |
title="Edge level LLM Chat demo",
|
108 |
description="In this demo, you can chat with sub-1B param range LLM - they are small enough to run with reasonable speed on most end user device. **Warning:** Do not input sensitive info - assume everything is public!"
|