Spaces:
Sleeping
Sleeping
File size: 1,987 Bytes
40be98a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the pre-trained model and tokenizer
@st.cache_resource
def load_model():
model_name = "microsoft/DialoGPT-medium" # Replace with your preferred model
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
model, tokenizer = load_model()
# Chat history
if "messages" not in st.session_state:
st.session_state["messages"] = []
# Sidebar configuration
st.sidebar.title("Chatbot Settings")
st.sidebar.write("Customize your chatbot:")
max_length = st.sidebar.slider("Max Response Length", 10, 200, 50)
temperature = st.sidebar.slider("Response Creativity (Temperature)", 0.1, 1.0, 0.7)
# App title
st.title("🤖 Open Source Text-to-Text Chatbot")
# Chat Interface
st.write("### Chat with the bot:")
user_input = st.text_input("You:", key="user_input", placeholder="Type your message here...")
if user_input:
# Encode the input and add chat history for context
inputs = tokenizer.encode(
" ".join(st.session_state["messages"] + [user_input]),
return_tensors="pt",
truncation=True
)
# Generate response
response = model.generate(
inputs,
max_length=max_length,
temperature=temperature,
pad_token_id=tokenizer.eos_token_id,
)
bot_response = tokenizer.decode(response[:, inputs.shape[-1]:][0], skip_special_tokens=True)
# Append to chat history
st.session_state["messages"].append(f"You: {user_input}")
st.session_state["messages"].append(f"Bot: {bot_response}")
# Display the chat
for message in st.session_state["messages"]:
if message.startswith("You:"):
st.markdown(f"**{message}**")
else:
st.markdown(f"> {message}")
# Clear chat history button
if st.button("Clear Chat"):
st.session_state["messages"] = []
st.experimental_rerun()
|