Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,31 +16,31 @@ from llama_index.core.node_parser import SentenceSplitter
|
|
16 |
|
17 |
max_seq_length = 512 # Choose any! We auto support RoPE Scaling internally!
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
# -------------------------
|
22 |
-
# {}
|
23 |
-
# -------------------------
|
24 |
-
# ### Pregunta:
|
25 |
-
# {}
|
26 |
-
# ### Respuesta:
|
27 |
-
# {}"""
|
28 |
-
|
29 |
-
prompt = """Responde a preguntas de forma clara, amable, concisa y solamente en el lenguaje español.
|
30 |
-
|
31 |
-------------------------
|
32 |
-
Contexto:
|
33 |
{}
|
34 |
-------------------------
|
35 |
-
|
36 |
### Pregunta:
|
37 |
{}
|
38 |
-
|
39 |
-
- Debes utilizar el contexto para responder la pregunta.
|
40 |
-
|
41 |
### Respuesta:
|
42 |
{}"""
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
# Initialize the LLM
|
45 |
llm = Llama(model_path="model.gguf",
|
46 |
n_ctx=512,
|
@@ -91,7 +91,7 @@ def reformat_rag(results_rag):
|
|
91 |
|
92 |
def chat_stream_completion(message, history):
|
93 |
|
94 |
-
context =
|
95 |
context = " \n ".join(context)
|
96 |
|
97 |
full_prompt = prompt.format(context,message,"")
|
|
|
16 |
|
17 |
max_seq_length = 512 # Choose any! We auto support RoPE Scaling internally!
|
18 |
|
19 |
+
prompt = """Responde a preguntas de forma clara, amable, concisa y solamente en el lenguaje español, sobre el libro Ñande Ypykuéra.
|
20 |
+
Contexto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
-------------------------
|
|
|
22 |
{}
|
23 |
-------------------------
|
|
|
24 |
### Pregunta:
|
25 |
{}
|
|
|
|
|
|
|
26 |
### Respuesta:
|
27 |
{}"""
|
28 |
|
29 |
+
# prompt = """Responde a preguntas de forma clara, amable, concisa y solamente en el lenguaje español.
|
30 |
+
|
31 |
+
# -------------------------
|
32 |
+
# Contexto:
|
33 |
+
# {}
|
34 |
+
# -------------------------
|
35 |
+
|
36 |
+
# ### Pregunta:
|
37 |
+
# {}
|
38 |
+
|
39 |
+
# - Debes utilizar el contexto para responder la pregunta.
|
40 |
+
|
41 |
+
# ### Respuesta:
|
42 |
+
# {}"""
|
43 |
+
|
44 |
# Initialize the LLM
|
45 |
llm = Llama(model_path="model.gguf",
|
46 |
n_ctx=512,
|
|
|
91 |
|
92 |
def chat_stream_completion(message, history):
|
93 |
|
94 |
+
context = reformat_rag(RAG.search(message, None, k=2))
|
95 |
context = " \n ".join(context)
|
96 |
|
97 |
full_prompt = prompt.format(context,message,"")
|