Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,14 +9,14 @@ from auditqa.sample_questions import QUESTIONS
|
|
| 9 |
from auditqa.engine.prompts import audience_prompts
|
| 10 |
from auditqa.reports import files, report_list
|
| 11 |
from auditqa.doc_process import process_pdf, get_local_qdrant
|
| 12 |
-
from
|
| 13 |
HumanMessage,
|
| 14 |
SystemMessage,
|
| 15 |
)
|
| 16 |
-
from langchain_huggingface import ChatHuggingFace
|
| 17 |
from langchain_core.output_parsers import StrOutputParser
|
| 18 |
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
| 19 |
-
from
|
|
|
|
| 20 |
from qdrant_client.http import models as rest
|
| 21 |
#from qdrant_client import QdrantClient
|
| 22 |
from dotenv import load_dotenv
|
|
@@ -193,11 +193,12 @@ async def chat(query,history,sources,reports,subtype,year):
|
|
| 193 |
|
| 194 |
# create rag chain
|
| 195 |
chat_model = ChatHuggingFace(llm=llm_qa)
|
|
|
|
| 196 |
###-------------------------- get answers ---------------------------------------
|
| 197 |
answer_lst = []
|
| 198 |
for question, context in zip(question_lst , context_retrieved_lst):
|
| 199 |
answer = chat_model.invoke(messages)
|
| 200 |
-
answer_lst.append(answer)
|
| 201 |
docs_html = []
|
| 202 |
for i, d in enumerate(context_retrieved, 1):
|
| 203 |
docs_html.append(make_html_source(d, i))
|
|
|
|
| 9 |
from auditqa.engine.prompts import audience_prompts
|
| 10 |
from auditqa.reports import files, report_list
|
| 11 |
from auditqa.doc_process import process_pdf, get_local_qdrant
|
| 12 |
+
from langchain.schema import (
|
| 13 |
HumanMessage,
|
| 14 |
SystemMessage,
|
| 15 |
)
|
|
|
|
| 16 |
from langchain_core.output_parsers import StrOutputParser
|
| 17 |
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
| 18 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
| 19 |
+
from langchain_community.chat_models.huggingface import ChatHuggingFace
|
| 20 |
from qdrant_client.http import models as rest
|
| 21 |
#from qdrant_client import QdrantClient
|
| 22 |
from dotenv import load_dotenv
|
|
|
|
| 193 |
|
| 194 |
# create rag chain
|
| 195 |
chat_model = ChatHuggingFace(llm=llm_qa)
|
| 196 |
+
|
| 197 |
###-------------------------- get answers ---------------------------------------
|
| 198 |
answer_lst = []
|
| 199 |
for question, context in zip(question_lst , context_retrieved_lst):
|
| 200 |
answer = chat_model.invoke(messages)
|
| 201 |
+
answer_lst.append(answer.content)
|
| 202 |
docs_html = []
|
| 203 |
for i, d in enumerate(context_retrieved, 1):
|
| 204 |
docs_html.append(make_html_source(d, i))
|