Spaces:
Runtime error
Runtime error
# https://towardsai.net/p/machine-learning/deploying-a-langchain-large-language-model-llm-with-streamlit-pinecone?amp=1 | |
"""Python file to serve as the frontend""" | |
import os | |
import pinecone | |
import streamlit as st | |
from streamlit_chat import message | |
from langchain.llms import OpenAI | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Pinecone | |
from langchain.chains import ConversationChain | |
from langchain.retrievers.self_query.base import SelfQueryRetriever | |
from langchain.chains.query_constructor.base import AttributeInfo | |
pinecone.init( | |
api_key=str(os.environ['PINECONE_API_KEY']), | |
environment=str(os.environ['PINECONE_ENV'])) | |
llm = OpenAI(temperature=0) | |
embeddings = OpenAIEmbeddings() | |
vectorstore = Pinecone.from_existing_index("impromptu", embeddings) | |
document_content_description = ("Excerpts from the book Impromptu, " | |
"jointly authored by Reid Hoffman, " | |
"GPT-3, and GPT 4.") | |
metadata_field_info=[ | |
AttributeInfo( | |
name="author", | |
description="The author of the excerpt", | |
type="string or list[string]", | |
), | |
AttributeInfo( | |
name="chapter_number", | |
description="The chapter number of excerpt", | |
type="integer", | |
), | |
AttributeInfo( | |
name="chapter_name", | |
description="The chapter name of the excerpt", | |
type="string", | |
), | |
] | |
# metadata_field_info=[ | |
# AttributeInfo( | |
# name="author", | |
# description="The author of the excerpt", | |
# type="string or list[string]", | |
# ), | |
# AttributeInfo( | |
# name="chapter_number", | |
# description="The chapter number of excerpt", | |
# type="integer", | |
# ), | |
# AttributeInfo( | |
# name="chapter_name", | |
# description="The chapter name of the excerpt", | |
# type="string", | |
# ), | |
# ] | |
# document_content_description = "Excerpt's from Reid Hoffman's book Impromptu" | |
# embeddings = OpenAIEmbeddings() | |
# index_name = str(os.environ['PINECONE_INDEX_NAME']) | |
def load_chain(): | |
# vectorstore = Pinecone.from_existing_index(index_name, embeddings) | |
# llm = OpenAI(temperature=0) | |
retriever = SelfQueryRetriever.from_llm( | |
llm, | |
vectorstore, | |
document_content_description, | |
metadata_field_info, | |
verbose=True) | |
return retriever | |
# docsearch = Pinecone.from_existing_index(index_name, embeddings) | |
# return docsearch | |
# def load_chain(): | |
# """Logic for loading the chain you want to use should go here.""" | |
# llm = OpenAI(temperature=0) | |
# chain = ConversationChain(llm=llm) | |
# return chain | |
chain = load_chain() | |
# From here down is all the StreamLit UI. | |
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:") | |
st.header("LangChain Demo") | |
if "generated" not in st.session_state: | |
st.session_state["generated"] = [] | |
if "past" not in st.session_state: | |
st.session_state["past"] = [] | |
def get_text(): | |
input_text = st.text_input("You: ", "What are the four types of hallucinations?", key="input") | |
return input_text | |
user_input = get_text() | |
if user_input: | |
docs = chain.get_relevant_documents(user_input) | |
print('docs') | |
print(docs) | |
print('docs[0]') | |
print(docs[0]) | |
print('docs[0].page_content') | |
print(docs[0].page_content) | |
output = docs[0].page_content | |
# output = 'nothing right now' | |
st.session_state.past.append(user_input) | |
st.session_state.generated.append(output) | |
if st.session_state["generated"]: | |
for i in range(len(st.session_state["generated"]) - 1, -1, -1): | |
message(st.session_state["generated"][i], key=str(i)) | |
message(st.session_state["past"][i], is_user=True, key=str(i) + "_user") |