ogegadavis254 commited on
Commit
ce84433
·
verified ·
1 Parent(s): 554fe45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -8,7 +8,7 @@ load_dotenv()
8
  # Initialize the client
9
  client = OpenAI(
10
  base_url="https://api-inference.huggingface.co/v1",
11
- api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
12
  )
13
 
14
  # Model link
@@ -35,15 +35,18 @@ st.title("Mistral-7B Chatbot")
35
 
36
  # Function to get streamed response
37
  def get_streamed_response(message, history):
38
- all_message = [{
 
 
39
  "role": "system",
40
- "content": "From now on, you are an AI assistant knowledgeable in general topics. You can respond with relevant information and provide concise, friendly replies. Always maintain a helpful and neutral tone. Ensure to be concise to keep the conversation flowing smoothly."
41
- }]
42
-
43
  for msg in history:
44
- all_message.append(msg)
45
- all_message.append({"role": "user", "content": message})
46
- return all_message
 
47
 
48
  # Display chat messages from history on app rerun
49
  for message in st.session_state.messages:
@@ -52,7 +55,6 @@ for message in st.session_state.messages:
52
 
53
  # Accept user input
54
  if prompt := st.chat_input("Type your message here..."):
55
-
56
  # Display user message in chat message container
57
  with st.chat_message("user"):
58
  st.markdown(prompt)
@@ -65,17 +67,18 @@ if prompt := st.chat_input("Type your message here..."):
65
  # Display assistant response in chat message container
66
  with st.chat_message("assistant"):
67
  try:
 
68
  response = client.chat.completions.create(
69
  model=model_link,
70
- messages=get_streamed_response(prompt, history),
71
  temperature=temperature,
72
  max_tokens=150 # Adjust the token limit as needed
73
  )
74
 
75
- # Ensure response handling is safe
76
  if 'choices' in response and response['choices']:
77
- st.markdown(response['choices'][0]['message']['content'])
78
- st.session_state.messages.append({"role": "assistant", "content": response['choices'][0]['message']['content']})
 
79
  else:
80
  st.markdown("No response received from the assistant.")
81
 
 
8
  # Initialize the client
9
  client = OpenAI(
10
  base_url="https://api-inference.huggingface.co/v1",
11
+ api_key=os.getenv('HUGGINGFACEHUB_API_TOKEN') # Ensure the environment variable is set correctly
12
  )
13
 
14
  # Model link
 
35
 
36
  # Function to get streamed response
37
  def get_streamed_response(message, history):
38
+ all_messages = []
39
+ # Include system prompt
40
+ all_messages.append({
41
  "role": "system",
42
+ "content": "You are a helpful assistant knowledgeable in general topics. Respond concisely and maintain a friendly and neutral tone."
43
+ })
44
+ # Add previous messages to history
45
  for msg in history:
46
+ all_messages.append(msg)
47
+ # Add the latest user message
48
+ all_messages.append({"role": "user", "content": message})
49
+ return all_messages
50
 
51
  # Display chat messages from history on app rerun
52
  for message in st.session_state.messages:
 
55
 
56
  # Accept user input
57
  if prompt := st.chat_input("Type your message here..."):
 
58
  # Display user message in chat message container
59
  with st.chat_message("user"):
60
  st.markdown(prompt)
 
67
  # Display assistant response in chat message container
68
  with st.chat_message("assistant"):
69
  try:
70
+ all_messages = get_streamed_response(prompt, history)
71
  response = client.chat.completions.create(
72
  model=model_link,
73
+ messages=all_messages,
74
  temperature=temperature,
75
  max_tokens=150 # Adjust the token limit as needed
76
  )
77
 
 
78
  if 'choices' in response and response['choices']:
79
+ content = response['choices'][0]['message']['content']
80
+ st.markdown(content)
81
+ st.session_state.messages.append({"role": "assistant", "content": content})
82
  else:
83
  st.markdown("No response received from the assistant.")
84