File size: 2,893 Bytes
9f54a3b
 
 
9892e5a
9f54a3b
9892e5a
9f54a3b
7986268
9f54a3b
9892e5a
 
 
9f54a3b
7986268
 
002b092
142827c
 
 
 
 
 
 
7986268
 
9f54a3b
7986268
 
9f54a3b
 
 
 
 
7986268
091c4e8
 
 
 
 
 
 
 
 
 
 
 
 
7986268
9f54a3b
 
 
 
 
 
091c4e8
9f54a3b
 
 
 
 
 
 
091c4e8
 
 
9f54a3b
 
002b092
091c4e8
7986268
091c4e8
7986268
091c4e8
002b092
9892e5a
091c4e8
002b092
 
091c4e8
 
 
002b092
091c4e8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv

load_dotenv()

# Initialize the client
client = OpenAI(
    base_url="https://api-inference.huggingface.co/v1",
    api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')  # Replace with your token
)

# Model link
model_link = "mistralai/Mistral-7B-Instruct-v0.2"

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.messages = []
    return None

# Set the temperature value directly in the code
temperature = 0.5

# Add a button to clear conversation
st.button('Reset Chat', on_click=reset_conversation)  # Reset button

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

st.title("Mistral-7B Chatbot")

# Function to get streamed response
def get_streamed_response(message, history):
    all_message = [{
        "role": "system",
        "content": "From now on, you are an AI assistant knowledgeable in general topics. You can respond with relevant information and provide concise, friendly replies. Always maintain a helpful and neutral tone. Ensure to be concise to keep the conversation flowing smoothly."
    }]
    
    for human, assistant in history:
        all_message.append({"role": "user", "content": human})
        all_message.append({"role": "assistant", "content": assistant})

    return all_message

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input("Type your message here..."):

    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Prepare all messages for the conversation context
    history = [(msg["content"], next((m["content"] for m in st.session_state.messages if m["role"] == "assistant"), "")) for msg in st.session_state.messages]

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        try:
            response = client.chat.completions.create(
                model=model_link,
                messages=get_streamed_response(prompt, history),
                temperature=temperature,
                max_tokens=150  # Adjust the token limit as needed
            )

            st.markdown(response['choices'][0]['message']['content'])

        except Exception as e:
            st.markdown("An error occurred. Please try again later.")
            st.markdown("Error details:")
            st.markdown(str(e))

    # Add assistant's response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response['choices'][0]['message']['content']})