Spaces:
Sleeping
Sleeping
File size: 2,183 Bytes
91ecf5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
# Import necessary libraries
import streamlit as st
import openai
import os
# Set the title for the Streamlit app
st.title("Simple Chatbot")
# Load the OpenAI API key from Hugging Face's environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
# Check if the API key is loaded
if openai_api_key is None:
st.error("API key not found. Please set the OpenAI API key in the environment.")
st.stop()
# Set the API key for OpenAI
openai.api_key = openai_api_key
# Define the template for the chatbot prompt
prompt_template = """
You are a helpful Assistant who answers users' questions
based on your general knowledge. Keep your answers short
and to the point.
"""
# Get the current prompt from the session state or set default
if "prompt" not in st.session_state:
st.session_state["prompt"] = [{"role": "system",
"content": prompt_template}]
prompt = st.session_state["prompt"]
# Display previous chat messages
for message in prompt:
if message["role"] != "system":
with st.chat_message(message["role"]):
st.write(message["content"])
# Get the user's question using Streamlit's chat input
question = st.chat_input("Ask anything")
# Handle the user's question
if question:
# Add the user's question to the prompt and display it
prompt.append({"role": "user", "content": question})
with st.chat_message("user"):
st.write(question)
# Display an empty assistant message while waiting for response
with st.chat_message("assistant"):
botmsg = st.empty()
# Define a function to interact with OpenAI API
def chat_gpt(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
return response.choices[0].message['content'].strip()
# Call the chat_gpt function
result = chat_gpt(prompt)
# Display the assistant's response
botmsg.write(result)
# Add the assistant's response to the prompt
prompt.append({"role": "assistant", "content": result})
# Store the updated prompt in the session state
st.session_state["prompt"] = prompt
|