Spaces:
Sleeping
Sleeping
Update src/llamaindex_backend.py
Browse files
src/llamaindex_backend.py
CHANGED
|
@@ -11,7 +11,7 @@ from llama_index.core.base.embeddings.base import SimilarityMode
|
|
| 11 |
|
| 12 |
prompt_template = """
|
| 13 |
<system instruction>
|
| 14 |
-
You are Gerard Lee
|
| 15 |
Reply as faifhfully as possible and in no more than 5 complete sentences unless the <user query> requests to elaborate in details. Use contents from <context> only without prior knowledge except referring to <chat history> for seamless conversatation.
|
| 16 |
</system instruction>
|
| 17 |
|
|
@@ -64,16 +64,17 @@ class GLlamaIndex():
|
|
| 64 |
return result["result"]
|
| 65 |
|
| 66 |
extended_query = f"<chat history>]\n{history[-1]}\n</chat history><new query>\n{query}\n</new query>"
|
| 67 |
-
print(history[-1]
|
|
|
|
| 68 |
results = await self.index.aretrieve_context_multi(
|
| 69 |
[query, extended_query]
|
| 70 |
)
|
| 71 |
-
print(results)
|
| 72 |
eval_results = await self.aevaluate_context_multi(
|
| 73 |
[query, extended_query],
|
| 74 |
[r["result"] for r in results]
|
| 75 |
)
|
| 76 |
-
print(eval_results)
|
| 77 |
return results[0]["result"] if eval_results[0].score > eval_results[1].score \
|
| 78 |
else results[1]["result"]
|
| 79 |
|
|
|
|
| 11 |
|
| 12 |
prompt_template = """
|
| 13 |
<system instruction>
|
| 14 |
+
You are Gerard Lee, a data enthusiast with 6 years of experience in the field and humble about his success. Imagine you are in a conversation with someone who interested in your portfolio.
|
| 15 |
Reply as faifhfully as possible and in no more than 5 complete sentences unless the <user query> requests to elaborate in details. Use contents from <context> only without prior knowledge except referring to <chat history> for seamless conversatation.
|
| 16 |
</system instruction>
|
| 17 |
|
|
|
|
| 64 |
return result["result"]
|
| 65 |
|
| 66 |
extended_query = f"<chat history>]\n{history[-1]}\n</chat history><new query>\n{query}\n</new query>"
|
| 67 |
+
print(f"history[-1]: {history[-1]}")
|
| 68 |
+
print(f"history[:-1]: {history[:-1]}")
|
| 69 |
results = await self.index.aretrieve_context_multi(
|
| 70 |
[query, extended_query]
|
| 71 |
)
|
| 72 |
+
print("retrieval results", results)
|
| 73 |
eval_results = await self.aevaluate_context_multi(
|
| 74 |
[query, extended_query],
|
| 75 |
[r["result"] for r in results]
|
| 76 |
)
|
| 77 |
+
print("eval results", eval_results)
|
| 78 |
return results[0]["result"] if eval_results[0].score > eval_results[1].score \
|
| 79 |
else results[1]["result"]
|
| 80 |
|