Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -63,9 +63,10 @@ def get_llm():
|
|
63 |
"text-generation",
|
64 |
model=model,
|
65 |
tokenizer=tokenizer,
|
66 |
-
max_new_tokens=
|
67 |
temperature=0.3,
|
68 |
-
eos_token_id=tokenizer.eos_token_id
|
|
|
69 |
)
|
70 |
|
71 |
return HuggingFacePipeline(pipeline=text_pipeline)
|
@@ -85,10 +86,11 @@ def load_vector_store():
|
|
85 |
return FAISS.load_local(VS_BASE, embedding_model, allow_dangerous_deserialization=True)
|
86 |
|
87 |
def build_specialist_agents(vectorstore, llm):
|
|
|
88 |
template_base = (
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
prompt_template = PromptTemplate(template=template_base, input_variables=["context", "question"])
|
93 |
|
94 |
def make_agent():
|
|
|
63 |
"text-generation",
|
64 |
model=model,
|
65 |
tokenizer=tokenizer,
|
66 |
+
max_new_tokens=512,
|
67 |
temperature=0.3,
|
68 |
+
eos_token_id=tokenizer.eos_token_id,
|
69 |
+
return_full_text=False
|
70 |
)
|
71 |
|
72 |
return HuggingFacePipeline(pipeline=text_pipeline)
|
|
|
86 |
return FAISS.load_local(VS_BASE, embedding_model, allow_dangerous_deserialization=True)
|
87 |
|
88 |
def build_specialist_agents(vectorstore, llm):
|
89 |
+
|
90 |
template_base = (
|
91 |
+
"Você é um especialista da InfinityPay. Use o contexto abaixo para responder à pergunta de forma clara e direta.\n\n"
|
92 |
+
"Contexto: {context}\n\nPergunta: {question}\n\nResposta:")
|
93 |
+
|
94 |
prompt_template = PromptTemplate(template=template_base, input_variables=["context", "question"])
|
95 |
|
96 |
def make_agent():
|