STLA-BABY / app.py
OuroborosM's picture
change to chathmi2
f87e37d
raw
history blame
6.16 kB
import openai
import os
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chat_models import AzureChatOpenAI
from langchain.document_loaders import DirectoryLoader
from langchain.chains import RetrievalQA
from langchain.vectorstores import Pinecone
import pinecone
from pinecone.core.client.configuration import Configuration as OpenApiConfiguration
import gradio as gr
import time
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE")
os.environ["OPENAI_API_VERSION"] = "2023-05-15"
chat = AzureChatOpenAI(
deployment_name="Chattester",
temperature=0,
)
embeddings = OpenAIEmbeddings(deployment="model_embedding")
pinecone.init(
api_key = os.getenv("pinecone_api_key"),
environment='asia-southeast1-gcp-free',
# openapi_config=openapi_config
)
index_name = 'stla-baby'
index = pinecone.Index(index_name)
# index.delete(delete_all=True, namespace='')
# print(pinecone.whoami())
# print(index.describe_index_stats())
global vectordb
vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
global vectordb_p
vectordb_p = Pinecone.from_existing_index(index_name, embeddings)
# loader = DirectoryLoader('./documents', glob='**/*.txt')
# documents = loader.load()
# text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
# split_docs = text_splitter.split_documents(documents)
# print(split_docs)
# vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')
# question = "what is LCDV ?"
# rr = vectordb.similarity_search(query=question, k=4)
# vectordb.similarity_search(question)
# print(type(rr))
# print(rr)
def chathmi(message, history):
response = "I don't know"
print(message)
response = QAQuery_p(message)
time.sleep(0.3)
print(history)
yield response
# yield history
def chathmi2(message, history):
response = QAQuery(message)
time.sleep(0.3)
print(history)
yield response
# yield history
# chatbot = gr.Chatbot().style(color_map =("blue", "pink"))
# chatbot = gr.Chatbot(color_map =("blue", "pink"))
demo = gr.ChatInterface(
chathmi2,
title="STLA BABY - YOUR FRIENDLY GUIDE",
)
# demo = gr.Interface(
# chathmi,
# ["text", "state"],
# [chatbot, "state"],
# allow_flagging="never",
# )
def CreatDb_P():
global vectordb_p
index_name = 'stla-baby'
loader = DirectoryLoader('./documents', glob='**/*.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
split_docs = text_splitter.split_documents(documents)
print(split_docs)
pinecone.Index(index_name).delete(delete_all=True, namespace='')
vectordb_p = Pinecone.from_documents(split_docs, embeddings, index_name = "stla-baby")
print("Pinecone Updated Done")
print(index.describe_index_stats())
def QAQuery_p(question: str):
global vectordb_p
# vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
retriever = vectordb_p.as_retriever()
retriever.search_kwargs['k'] = 3
# retriever.search_kwargs['fetch_k'] = 100
qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", retriever=retriever, return_source_documents = True)
# qa = VectorDBQA.from_chain_type(llm=chat, chain_type="stuff", vectorstore=vectordb, return_source_documents=True)
# res = qa.run(question)
res = qa({"query": question})
print("-" * 20)
print("Question:", question)
# print("Answer:", res)
print("Answer:", res['result'])
print("-" * 20)
print("Source:", res['source_documents'])
response = res['result']
# response = res['source_documents']
return response
def CreatDb():
global vectordb
loader = DirectoryLoader('./documents', glob='**/*.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
split_docs = text_splitter.split_documents(documents)
print(split_docs)
vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')
vectordb.persist()
def QAQuery(question: str):
global vectordb
# vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
retriever = vectordb.as_retriever()
retriever.search_kwargs['k'] = 3
# retriever.search_kwargs['fetch_k'] = 100
qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", retriever=retriever, return_source_documents = True)
# qa = VectorDBQA.from_chain_type(llm=chat, chain_type="stuff", vectorstore=vectordb, return_source_documents=True)
# res = qa.run(question)
res = qa({"query": question})
print("-" * 20)
print("Question:", question)
# print("Answer:", res)
print("Answer:", res['result'])
print("-" * 20)
print("Source:", res['source_documents'])
# Used to complete content
def completeText(Text):
deployment_id="Chattester"
prompt = Text
completion = openai.Completion.create(deployment_id=deployment_id,
prompt=prompt, temperature=0)
print(f"{prompt}{completion['choices'][0]['text']}.")
# Used to chat
def chatText(Text):
deployment_id="Chattester"
conversation = [{"role": "system", "content": "You are a helpful assistant."}]
user_input = Text
conversation.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(messages=conversation,
deployment_id="Chattester")
print("\n" + response["choices"][0]["message"]["content"] + "\n")
if __name__ == '__main__':
# chatText("what is AI?")
# CreatDb()
# QAQuery("what is COFOR ?")
# CreatDb_P()
# QAQuery_p("what is GST ?")
demo.queue().launch()
pass