Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the model and tokenizer | |
model = AutoModelForCausalLM.from_pretrained("Smilyai-labs/Smily-ultra-1") | |
tokenizer = AutoTokenizer.from_pretrained("Smilyai-labs/Smily-ultra-1") | |
# Function to generate responses from the model | |
def chatbot(input_text, chat_history=[]): | |
# Encode the new user input, add the chat history as context | |
new_user_input_ids = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt') | |
# Concatenate the chat history and the new user input | |
bot_input_ids = new_user_input_ids | |
for history in chat_history: | |
bot_input_ids = torch.cat([history['input_ids'], bot_input_ids], dim=-1) | |
# Generate a response from the model | |
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id, | |
temperature=0.7, top_k=50, top_p=0.95, do_sample=True, | |
eos_token_id=tokenizer.eos_token_id) | |
# Decode the generated response | |
bot_response = tokenizer.decode(chat_history_ids[0], skip_special_tokens=True) | |
# Update the chat history | |
chat_history.append({'input_ids': bot_input_ids, 'response': bot_response}) | |
return bot_response, chat_history | |
# Gradio interface with a more chatbot-like layout | |
with gr.Blocks() as demo: | |
with gr.Column(): | |
# Create a text box to display conversation history | |
chatbot_output = gr.Chatbot(label="Chatbot") | |
# Create a text box for user input at the bottom | |
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...", show_label=False) | |
# Create a submit button to trigger the chatbot function | |
submit_button = gr.Button("Send") | |
# Link the button to the chatbot function | |
submit_button.click(chatbot, inputs=[user_input, chatbot_output], outputs=[chatbot_output, chatbot_output]) | |
# Launch the Gradio interface | |
demo.launch() |