Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from langchain_community.vectorstores import Chroma
|
|
| 5 |
from langchain.retrievers import MultiQueryRetriever
|
| 6 |
from langchain.chains import ConversationalRetrievalChain
|
| 7 |
from langchain.memory import ConversationBufferWindowMemory
|
| 8 |
-
from langchain_community.llms import llamacpp,
|
| 9 |
from langchain.prompts import PromptTemplate
|
| 10 |
from langchain.chains import LLMChain
|
| 11 |
from langchain.chains.question_answering import load_qa_chain
|
|
@@ -22,6 +22,15 @@ system_prompt = """You are a helpful assistant, you will use the provided contex
|
|
| 22 |
Read the given context before answering questions and think step by step. If you can not answer a user question based on the provided context, inform the user.
|
| 23 |
Do not use any other information for answering the user. Provide a detailed answer to the question."""
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def load_quantized_model(model_id=None):
|
| 26 |
MODEL_ID, MODEL_BASENAME = "TheBloke/zephyr-7B-beta-GGUF","zephyr-7b-beta.Q5_K_S.gguf"
|
| 27 |
try:
|
|
@@ -70,6 +79,7 @@ with gr.Blocks() as demo:
|
|
| 70 |
# llm = load_quantized_model(model_id=model_id) #type:ignore
|
| 71 |
# ---------------------------------------------------------------------------------------------------
|
| 72 |
llm = load_quantized_model()
|
|
|
|
| 73 |
# ---------------------------------------------------------------------------------------------------
|
| 74 |
condense_question_prompt_template = PromptTemplate.from_template(_template)
|
| 75 |
prompt_template = system_prompt + """
|
|
@@ -80,11 +90,11 @@ with gr.Blocks() as demo:
|
|
| 80 |
memory = ConversationBufferWindowMemory(memory_key='chat_history', k=1, return_messages=True)
|
| 81 |
retriever_from_llm = MultiQueryRetriever.from_llm(
|
| 82 |
retriever=db2.as_retriever(search_kwargs={'k':5}),
|
| 83 |
-
llm =
|
| 84 |
)
|
| 85 |
qa2 = ConversationalRetrievalChain(
|
| 86 |
retriever=retriever_from_llm,
|
| 87 |
-
question_generator= LLMChain(llm=
|
| 88 |
combine_docs_chain=load_qa_chain(llm=llm, chain_type="stuff", prompt=qa_prompt, verbose=True), #type:ignore
|
| 89 |
memory=memory,
|
| 90 |
verbose=True,
|
|
@@ -134,4 +144,4 @@ with gr.Blocks() as demo:
|
|
| 134 |
|
| 135 |
if __name__ == "__main__":
|
| 136 |
demo.queue()
|
| 137 |
-
demo.launch(max_threads=
|
|
|
|
| 5 |
from langchain.retrievers import MultiQueryRetriever
|
| 6 |
from langchain.chains import ConversationalRetrievalChain
|
| 7 |
from langchain.memory import ConversationBufferWindowMemory
|
| 8 |
+
from langchain_community.llms import llamacpp, huggingface_hub
|
| 9 |
from langchain.prompts import PromptTemplate
|
| 10 |
from langchain.chains import LLMChain
|
| 11 |
from langchain.chains.question_answering import load_qa_chain
|
|
|
|
| 22 |
Read the given context before answering questions and think step by step. If you can not answer a user question based on the provided context, inform the user.
|
| 23 |
Do not use any other information for answering the user. Provide a detailed answer to the question."""
|
| 24 |
|
| 25 |
+
def load_llmware_model():
|
| 26 |
+
return huggingface_hub.HuggingFaceHub(
|
| 27 |
+
repo_id = "",
|
| 28 |
+
verbose=True,
|
| 29 |
+
model_kwargs={
|
| 30 |
+
'temperature':0.03,
|
| 31 |
+
'n_batch':128,
|
| 32 |
+
}
|
| 33 |
+
)
|
| 34 |
def load_quantized_model(model_id=None):
|
| 35 |
MODEL_ID, MODEL_BASENAME = "TheBloke/zephyr-7B-beta-GGUF","zephyr-7b-beta.Q5_K_S.gguf"
|
| 36 |
try:
|
|
|
|
| 79 |
# llm = load_quantized_model(model_id=model_id) #type:ignore
|
| 80 |
# ---------------------------------------------------------------------------------------------------
|
| 81 |
llm = load_quantized_model()
|
| 82 |
+
llm_sm = load_llmware_model()
|
| 83 |
# ---------------------------------------------------------------------------------------------------
|
| 84 |
condense_question_prompt_template = PromptTemplate.from_template(_template)
|
| 85 |
prompt_template = system_prompt + """
|
|
|
|
| 90 |
memory = ConversationBufferWindowMemory(memory_key='chat_history', k=1, return_messages=True)
|
| 91 |
retriever_from_llm = MultiQueryRetriever.from_llm(
|
| 92 |
retriever=db2.as_retriever(search_kwargs={'k':5}),
|
| 93 |
+
llm = llm_sm,
|
| 94 |
)
|
| 95 |
qa2 = ConversationalRetrievalChain(
|
| 96 |
retriever=retriever_from_llm,
|
| 97 |
+
question_generator= LLMChain(llm=llm_sm, prompt=condense_question_prompt_template, memory=memory, verbose=True), #type:ignore
|
| 98 |
combine_docs_chain=load_qa_chain(llm=llm, chain_type="stuff", prompt=qa_prompt, verbose=True), #type:ignore
|
| 99 |
memory=memory,
|
| 100 |
verbose=True,
|
|
|
|
| 144 |
|
| 145 |
if __name__ == "__main__":
|
| 146 |
demo.queue()
|
| 147 |
+
demo.launch(max_threads=8, debug=True, show_error=True)
|