Spaces:
Sleeping
Sleeping
File size: 2,303 Bytes
9f54a3b 9892e5a 9f54a3b 9892e5a 9f54a3b 7986268 fe5f88e 9f54a3b ed8bd19 7986268 002b092 142827c 7986268 9f54a3b ed8bd19 9f54a3b d36f2e1 9f54a3b 091c4e8 ed8bd19 9f54a3b d36f2e1 9f54a3b d36f2e1 fe5f88e 7d011fb fe5f88e d36f2e1 fe5f88e d36f2e1 ed8bd19 fe5f88e 7d011fb d36f2e1 fe5f88e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the client
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
# Model link for Mistral
model_link = "mistralai/Mistral-7B-Instruct-v0.2"
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.messages = []
# Set the temperature value directly in the code
temperature = 0.5
# Add reset button to clear conversation
st.button('Reset Chat', on_click=reset_conversation)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("Type your message here..."):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Interact with the model
try:
# Construct the conversation context
conversation_context = [{"role": "system", "content": "You are a helpful assistant."}]
for msg in st.session_state.messages:
conversation_context.append(msg)
# Send the conversation context to the API
response = client.chat.completions.create(
model=model_link,
messages=conversation_context,
temperature=temperature,
max_tokens=150 # Adjust the max tokens according to your needs
)
assistant_response = response.choices[0].message["content"]
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(assistant_response)
# Append the assistant's response to the chat history
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
except Exception as e:
# Display error message to user
with st.chat_message("assistant"):
st.markdown("Sorry, I couldn't process your request. Please try again later.")
st.error(f"An error occurred: {e}")
|