Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def chat(message, history, temperature, max_new_tokens):
|
|
43 |
|
44 |
# Ensure the model is loaded before proceeding
|
45 |
if not check_model_status():
|
46 |
-
yield "Model is not ready. Please try again later."
|
47 |
return
|
48 |
|
49 |
prompt = f"Human: {message}\n\nAssistant:"
|
@@ -126,10 +126,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
126 |
bot_message = ""
|
127 |
for partial_response, token_status_value in chat(message, chat_history, temperature, max_new_tokens):
|
128 |
bot_message = partial_response
|
129 |
-
|
130 |
-
yield "", chat_history + [(message, bot_message)]
|
131 |
|
132 |
-
send_button.click(respond, inputs=[textbox, chatbot, temperature_slider, max_tokens_slider], outputs=[textbox, chatbot])
|
133 |
clear_button.click(lambda: [], None, chatbot)
|
134 |
reload_button.click(reload_model_button, None, status_text)
|
135 |
|
|
|
43 |
|
44 |
# Ensure the model is loaded before proceeding
|
45 |
if not check_model_status():
|
46 |
+
yield "Model is not ready. Please try again later.", ""
|
47 |
return
|
48 |
|
49 |
prompt = f"Human: {message}\n\nAssistant:"
|
|
|
126 |
bot_message = ""
|
127 |
for partial_response, token_status_value in chat(message, chat_history, temperature, max_new_tokens):
|
128 |
bot_message = partial_response
|
129 |
+
yield "", chat_history + [(message, bot_message)], gr.update(value=token_status_value)
|
|
|
130 |
|
131 |
+
send_button.click(respond, inputs=[textbox, chatbot, temperature_slider, max_tokens_slider], outputs=[textbox, chatbot, token_status])
|
132 |
clear_button.click(lambda: [], None, chatbot)
|
133 |
reload_button.click(reload_model_button, None, status_text)
|
134 |
|