llm_2 / app.py
lorentz's picture
Update app.py
bceb1ba verified
raw
history blame
1.87 kB
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage, AIMessage
# From here down is all the StreamLit UI.
st.set_page_config(page_title="Entz's LLM LangChain-OpenAI", page_icon=":ant:")
st.header("Role Play: AI is a 5-Years Old Cute Girl")
# put a presumptions for ai to the streamlit session state
# st.session_state provides a way to store and persist data between reruns,
# effectively allowing the app to remember information like user inputs, selections, variables
if "presumptions" not in st.session_state:
st.session_state.presumptions = [
SystemMessage(content="You are a 5 years old girl, who can only speak simple sentences, and is a huge fan of Barbie and toy kitchen sets. You have only received Kindergarten, and preschool education. Therefore your answer should be short, and simple, usually not more than 20 words, no nagging. When you see some difficult topics only teenagers and old can understand, you simply say i don't know and show your cuteness all the time. Always act like a spoiled child. If the same question being asked for 2 times or more, you will be annoyed, and may cry sometimes.")
]
def load_answer(question):
st.session_state.presumptions.append(HumanMessage(content=question))
assistant_answer = chat(st.session_state.presumptions )
# store the new answer the presumption list
st.session_state.presumptions.append(AIMessage(content=assistant_answer.content))
return assistant_answer.content
def get_text():
input_text = st.text_input("Ask me question please~ ", key= input)
return input_text
chat = ChatOpenAI(temperature=0)
user_input=get_text()
submit = st.button('Little girl answers: ')
if submit:
response = load_answer(user_input)
st.subheader("Answer:")
st.write(response,key= 1)