Spaces:
Sleeping
Sleeping
import streamlit as st | |
from openai import OpenAI | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
# Initialize the client | |
client = OpenAI( | |
base_url="https://api-inference.huggingface.co/v1", | |
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token | |
) | |
# Model link | |
model_link = "mistralai/Mistral-7B-Instruct-v0.2" | |
def reset_conversation(): | |
''' | |
Resets Conversation | |
''' | |
st.session_state.messages = [] | |
return None | |
# Set the temperature value directly in the code | |
temperature = 0.5 | |
# Add a button to clear conversation | |
st.button('Reset Chat', on_click=reset_conversation) # Reset button | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
st.title("Mistral-7B Chatbot") | |
# Function to get streamed response | |
def get_streamed_response(message, history): | |
all_message = [{ | |
"role": "system", | |
"content": "From now on, you are an AI assistant knowledgeable in general topics. You can respond with relevant information and provide concise, friendly replies. Always maintain a helpful and neutral tone. Ensure to be concise to keep the conversation flowing smoothly." | |
}] | |
for human, assistant in history: | |
all_message.append({"role": "user", "content": human}) | |
all_message.append({"role": "assistant", "content": assistant}) | |
return all_message | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("Type your message here..."): | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Prepare all messages for the conversation context | |
history = [(msg["content"], next((m["content"] for m in st.session_state.messages if m["role"] == "assistant"), "")) for msg in st.session_state.messages] | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
try: | |
response = client.chat.completions.create( | |
model=model_link, | |
messages=get_streamed_response(prompt, history), | |
temperature=temperature, | |
max_tokens=150 # Adjust the token limit as needed | |
) | |
st.markdown(response['choices'][0]['message']['content']) | |
except Exception as e: | |
st.markdown("An error occurred. Please try again later.") | |
st.markdown("Error details:") | |
st.markdown(str(e)) | |
# Add assistant's response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response['choices'][0]['message']['content']}) | |