abdullahalioo commited on
Commit
d8bcefa
·
verified ·
1 Parent(s): 1e66054

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +40 -0
main.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Initialize the Qwen model pipeline
5
+ pipe = pipeline("text-generation", model="Qwen/Qwen1.5-0.5B")
6
+
7
+ def chatbot(user_input, history=[]):
8
+ # Append user input to the conversation history
9
+ messages = history + [{"role": "user", "content": user_input}]
10
+
11
+ # Generate response from the model
12
+ response = pipe(messages, max_new_tokens=150, do_sample=True, temperature=0.7)[0]['generated_text']
13
+
14
+ # Extract the assistant's response (assuming the pipeline returns the full conversation)
15
+ assistant_response = response[-1]["content"] if isinstance(response, list) and len(response) > 0 else response
16
+
17
+ # Update history with user input and assistant response
18
+ history = messages + [{"role": "assistant", "content": assistant_response}]
19
+
20
+ # Format the conversation for display
21
+ chat_display = ""
22
+ for msg in history:
23
+ role = "You" if msg["role"] == "user" else "Assistant"
24
+ chat_display += f"**{role}**: {msg['content']}\n\n"
25
+
26
+ return chat_display, history
27
+
28
+ # Create Gradio interface
29
+ with gr.Blocks() as demo:
30
+ gr.Markdown("# AI Chatbot with Qwen1.5-0.5B")
31
+ chatbot_output = gr.Markdown()
32
+ user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
33
+ state = gr.State(value=[]) # To store conversation history
34
+
35
+ user_input.submit(
36
+ fn=chatbot,
37
+ inputs=[user_input, state],
38
+ outputs=[chatbot_output, state]
39
+ )
40
+