File size: 3,064 Bytes
ef37daa
711f069
 
f34df8a
711f069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f34df8a
 
711f069
f944272
 
 
f34df8a
 
 
 
 
 
 
 
f944272
 
 
 
 
 
 
 
 
 
 
 
 
 
f34df8a
 
711f069
f944272
 
f34df8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f944272
e1ff28f
f944272
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import os
from huggingface_hub import InferenceClient
import time

hf_token = os.getenv("hf_token")

client = InferenceClient(api_key=hf_token)

def get_response(user_input):
    messages = [
        { "role": "system", "content": "you are xylaria 1.4 senoa, developed by sk md saad amin" },
        { "role": "user", "content": user_input }
    ]
    
    stream = client.chat.completions.create(
        model="Qwen/QwQ-32B-Preview", 
        messages=messages, 
        temperature=0.5,
        max_tokens=10240,
        top_p=0.7,
        stream=True
    )
    
    response = ""
    for chunk in stream:
        response += chunk.choices[0].delta.content
        yield response  # Yielding progressively as the model generates output
        time.sleep(0.05)  # Optional: Adjust speed of the stream (in seconds)

def chat_interface():
    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column(scale=1):
                chat_output = gr.Chatbot(
                    elem_id="chat-box",
                    label="Xylaria 1.4 Senoa Chatbot",
                    show_label=False
                )

        with gr.Row(elem_id="input-row", scale=0.2):
            with gr.Column(scale=0.8):
                input_textbox = gr.Textbox(
                    label="Type your message", 
                    placeholder="Ask me anything...",
                    lines=1,
                    max_lines=3,
                    interactive=True,
                    elem_id="user-input",
                    show_label=False
                )
            with gr.Column(scale=0.2):
                send_button = gr.Button("Send", elem_id="send-btn")

        def submit_input(user_input, chat_history):
            chat_history.append((user_input, ""))
            return "", chat_history  # Clear input field

        input_textbox.submit(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
        send_button.click(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])

        def handle_response(user_input, chat_history):
            chat_history[-1] = (user_input, "")  # Update the last chat with user input
            response_stream = get_response(user_input)
            for partial_response in response_stream:
                chat_history[-1] = (user_input, partial_response)
                yield "", chat_history  # Return the updated chat history progressively

        input_textbox.submit(handle_response, [input_textbox, chat_output], [input_textbox, chat_output])
        send_button.click(handle_response, [input_textbox, chat_output], [input_textbox, chat_output])

    demo.css = """
    #input-row {
        position: absolute;
        bottom: 10px;
        width: 100%;
        padding: 10px;
        background-color: #f5f5f5;
        border-top: 1px solid #ddd;
    }
    #chat-box {
        height: calc(100vh - 100px); /* Adjust the height of chat history */
        overflow-y: scroll;
    }
    """

    return demo

demo = chat_interface()
demo.launch()