Reality123b commited on
Commit
f34df8a
·
verified ·
1 Parent(s): f944272

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -11
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
 
4
 
5
  hf_token = os.getenv("hf_token")
6
 
@@ -24,11 +25,20 @@ def get_response(user_input):
24
  response = ""
25
  for chunk in stream:
26
  response += chunk.choices[0].delta.content
27
- return response
 
28
 
29
  def chat_interface():
30
  with gr.Blocks() as demo:
31
  with gr.Row():
 
 
 
 
 
 
 
 
32
  with gr.Column(scale=0.8):
33
  input_textbox = gr.Textbox(
34
  label="Type your message",
@@ -42,20 +52,38 @@ def chat_interface():
42
  with gr.Column(scale=0.2):
43
  send_button = gr.Button("Send", elem_id="send-btn")
44
 
45
- chat_output = gr.Chatbot(
46
- elem_id="chat-box",
47
- label="Xylaria 1.4 Senoa Chatbot",
48
- show_label=False
49
- )
50
-
51
  def submit_input(user_input, chat_history):
52
- response = get_response(user_input)
53
- chat_history.append((user_input, response))
54
- return "", chat_history
55
 
56
  input_textbox.submit(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
57
  send_button.click(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
58
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  return demo
60
 
61
  demo = chat_interface()
 
1
  import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
4
+ import time
5
 
6
  hf_token = os.getenv("hf_token")
7
 
 
25
  response = ""
26
  for chunk in stream:
27
  response += chunk.choices[0].delta.content
28
+ yield response # Yielding progressively as the model generates output
29
+ time.sleep(0.05) # Optional: Adjust speed of the stream (in seconds)
30
 
31
  def chat_interface():
32
  with gr.Blocks() as demo:
33
  with gr.Row():
34
+ with gr.Column(scale=1):
35
+ chat_output = gr.Chatbot(
36
+ elem_id="chat-box",
37
+ label="Xylaria 1.4 Senoa Chatbot",
38
+ show_label=False
39
+ )
40
+
41
+ with gr.Row(elem_id="input-row", scale=0.2):
42
  with gr.Column(scale=0.8):
43
  input_textbox = gr.Textbox(
44
  label="Type your message",
 
52
  with gr.Column(scale=0.2):
53
  send_button = gr.Button("Send", elem_id="send-btn")
54
 
 
 
 
 
 
 
55
  def submit_input(user_input, chat_history):
56
+ chat_history.append((user_input, ""))
57
+ return "", chat_history # Clear input field
 
58
 
59
  input_textbox.submit(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
60
  send_button.click(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
61
+
62
+ def handle_response(user_input, chat_history):
63
+ chat_history[-1] = (user_input, "") # Update the last chat with user input
64
+ response_stream = get_response(user_input)
65
+ for partial_response in response_stream:
66
+ chat_history[-1] = (user_input, partial_response)
67
+ yield "", chat_history # Return the updated chat history progressively
68
+
69
+ input_textbox.submit(handle_response, [input_textbox, chat_output], [input_textbox, chat_output])
70
+ send_button.click(handle_response, [input_textbox, chat_output], [input_textbox, chat_output])
71
+
72
+ demo.css = """
73
+ #input-row {
74
+ position: absolute;
75
+ bottom: 10px;
76
+ width: 100%;
77
+ padding: 10px;
78
+ background-color: #f5f5f5;
79
+ border-top: 1px solid #ddd;
80
+ }
81
+ #chat-box {
82
+ height: calc(100vh - 100px); /* Adjust the height of chat history */
83
+ overflow-y: scroll;
84
+ }
85
+ """
86
+
87
  return demo
88
 
89
  demo = chat_interface()