Spaces:
Runtime error
Runtime error
File size: 2,998 Bytes
8c59a5d 9261c91 4887f73 9261c91 4887f73 fd6bc5d 9261c91 4887f73 2405f3d 4887f73 51e904b 4887f73 9261c91 4887f73 145b949 4887f73 9261c91 8c59a5d 9261c91 c60291c 76e7063 2536429 be4973b eb15d97 27ba306 eb15d97 9261c91 a96d777 cb4e2a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
# https://towardsai.net/p/machine-learning/deploying-a-langchain-large-language-model-llm-with-streamlit-pinecone?amp=1
"""Python file to serve as the frontend"""
import os
import pinecone
import streamlit as st
from streamlit_chat import message
from langchain.llms import OpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.chains import ConversationChain
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains import RetrievalQA
from langchain.chains.query_constructor.base import AttributeInfo
pinecone.init(
api_key=str(os.environ['PINECONE_API_KEY']),
environment=str(os.environ['PINECONE_ENV']))
llm = OpenAI(temperature=0)
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone.from_existing_index("impromptu", embeddings)
document_content_description = ("Excerpts from the book Impromptu, "
"jointly authored by Reid Hoffman, "
"GPT-3, and GPT 4.")
metadata_field_info=[
AttributeInfo(
name="author",
description="The author of the excerpt",
type="string or list[string]",
),
AttributeInfo(
name="chapter_number",
description="The chapter number of excerpt",
type="integer",
),
AttributeInfo(
name="chapter_name",
description="The chapter name of the excerpt",
type="string",
),
]
def load_chain():
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=False)
return qa
chain = load_chain()
# From here down is all the StreamLit UI.
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
st.header("LangChain Demo")
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
def get_text():
input_text = st.text_input("You: ", "What are the four types of hallucinations?", key="input")
return input_text
user_input = get_text()
if user_input:
docs = chain.run(user_input)
print('docs')
print(docs)
# print('docs[0]')
# print(docs[0])
# print('docs[0].page_content')
# print(docs[0].page_content)
# output = docs[0].page_content
# output = 'nothing right now'
output = docs
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
if st.session_state["generated"]:
for i in range(len(st.session_state["generated"]) - 1, -1, -1):
message(st.session_state["generated"][i], key=str(i), avatar_style="shapes")
message(st.session_state["past"][i], is_user=True, key=str(i) + "_user", avatar_style="croodles") |