Spaces:
Sleeping
Sleeping
File size: 2,594 Bytes
ef37daa 3d08dbc f69c6af e1ff28f f69c6af 691f69e 8afc2a3 3d08dbc f69c6af 8afc2a3 3d08dbc f69c6af 3d08dbc f69c6af 83e20b0 f69c6af 8afc2a3 f69c6af e1ff28f f69c6af e1ff28f f69c6af e1ff28f f69c6af 8afc2a3 f69c6af e0b816f 8afc2a3 f69c6af 8afc2a3 e1ff28f f69c6af 8afc2a3 f69c6af fe44201 f69c6af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Set seed for reproducibility
torch.random.manual_seed(0)
# Load the model and tokenizer (using CPU)
model = AutoModelForCausalLM.from_pretrained(
"microsoft/Phi-3.5-mini-instruct",
device_map="cpu", # Use CPU
torch_dtype="auto",
trust_remote_code=True,
)
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct")
# Define the pipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=-1 # CPU
)
# System message (invisible to the user)
SYSTEM_MESSAGE = {"role": "system", "content": "You are a helpful AI assistant."}
# Function to process the user input and generate output
def chatbot_response(conversation_history):
# Build message sequence
messages = [SYSTEM_MESSAGE] + [
{"role": "user", "content": message["user_input"]} for message in conversation_history
]
# Pass messages to the model
generation_args = {
"max_new_tokens": 500,
"return_full_text": False,
"temperature": 0.0,
"do_sample": False,
}
output = pipe(messages, **generation_args)
assistant_reply = output[0]["generated_text"]
# Append assistant's response to history
conversation_history[-1]["assistant_reply"] = assistant_reply
return conversation_history
# Define Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI Chatbot with System Message")
with gr.Row():
with gr.Column():
chatbox = gr.Chatbot(label="Conversation")
input_box = gr.Textbox(label="Your Message", placeholder="Type your message here...")
submit_btn = gr.Button("Submit")
conversation_state = gr.State([]) # Maintain conversation history
# Function to update the conversation
def update_conversation(user_input, history):
if user_input.strip():
history.append({"user_input": user_input})
updated_history = chatbot_response(history)
# Format conversation history for the Chatbot component
formatted_conversation = [
(msg["user_input"], msg.get("assistant_reply", ""))
for msg in updated_history
]
return formatted_conversation, updated_history, ""
return [], history, ""
submit_btn.click(
update_conversation,
inputs=[input_box, conversation_state],
outputs=[chatbox, conversation_state, input_box],
)
# Launch the interface
demo.launch()
|