streamlit_test / app.py
joeyanuff's picture
below
d02f574
# https://towardsai.net/p/machine-learning/deploying-a-langchain-large-language-model-llm-with-streamlit-pinecone?amp=1
import os
import pinecone
import streamlit as st
from streamlit_chat import message
from langchain.llms import OpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.chains import ConversationChain
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains import RetrievalQA
from langchain.chains.query_constructor.base import AttributeInfo
pinecone.init(
api_key=str(os.environ['PINECONE_API_KEY']),
environment=str(os.environ['PINECONE_ENV']))
llm = OpenAI(temperature=0)
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone.from_existing_index("impromptu", embeddings)
document_content_description = ("Excerpts from the book Impromptu, "
"jointly authored by Reid Hoffman, "
"GPT-3, and GPT 4.")
metadata_field_info=[
AttributeInfo(
name="author",
description="The author of the excerpt",
type="string or list[string]",
),
AttributeInfo(
name="chapter_number",
description="The chapter number of excerpt",
type="integer",
),
AttributeInfo(
name="chapter_name",
description="The chapter name of the excerpt",
type="string",
),
]
def load_chain():
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=False)
return qa
chain = load_chain()
# From here down is all the StreamLit UI.
st.set_page_config(page_title="Impromptu GPT", page_icon=":robot:")
st.header("Impromptu GPT")
'''
> A secondary UI testing LangChain's Python-only [```SelfQueryRetriever```](https://python.langchain.com/en/latest/modules/indexes/retrievers/examples/self_query_retriever.html)
interface, which uses an LLM to structure vectorstore queries.
Below, conversational requests for specific chapters and
speakers are extracted into relevant metadata filters.
'''
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
def get_text():
input_text = st.text_input("You: ", "What are some of the risks mentioned in chapter one?", key="input")
return input_text
user_input = get_text()
if user_input:
output = chain.run(user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if st.session_state["generated"]:
for i in range(len(st.session_state["generated"]) - 1, -1, -1):
message(st.session_state["generated"][i], key=str(i), avatar_style="bottts")
message(st.session_state["past"][i], is_user=True, key=str(i) + "_user", avatar_style="shapes")