Spaces:
Sleeping
Sleeping
Update telcom_core.py
Browse files- telcom_core.py +9 -16
telcom_core.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import re
|
2 |
-
from llama_index.core import PromptTemplate
|
3 |
from llama_index.llms.openai import OpenAI
|
|
|
4 |
|
5 |
teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance.
|
6 |
|
@@ -183,23 +183,16 @@ def query_graph_rag_qa(graph_rag_index,query,search_level):
|
|
183 |
|
184 |
return response, reference , reference_text
|
185 |
|
186 |
-
def eval_llm(query,rag_response,grag_response):
|
187 |
-
data = {'QUERY': query,
|
188 |
-
'RESPONSE': rag_response
|
189 |
-
}
|
190 |
-
prompt = PromptTemplate(llm_eval_prompt)
|
191 |
-
query_ready = prompt.format(**data)
|
192 |
-
rag_eval = OpenAI().complete(query_ready)
|
193 |
-
|
194 |
-
data = {'QUERY': query,
|
195 |
-
'RESPONSE': grag_response
|
196 |
-
}
|
197 |
-
prompt = PromptTemplate(llm_eval_prompt)
|
198 |
-
query_ready = prompt.format(**data)
|
199 |
-
grag_eval = OpenAI().complete(query_ready)
|
200 |
-
return grag_eval,rag_eval
|
201 |
|
202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
def plot_full_kg(kg_plot_path):
|
204 |
"""Plot the full knowledge graph and return the HTML representation."""
|
205 |
# return HTML(filename=kg_plot_path)
|
|
|
1 |
import re
|
|
|
2 |
from llama_index.llms.openai import OpenAI
|
3 |
+
from llama_index.core import PromptTemplate
|
4 |
|
5 |
teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance.
|
6 |
|
|
|
183 |
|
184 |
return response, reference , reference_text
|
185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
|
188 |
+
def evaluate_llm(query,response):
|
189 |
+
"""
|
190 |
+
Evaluates the provided query and response using a PromptTemplate and returns the completion from OpenAI.
|
191 |
+
"""
|
192 |
+
data = {'QUERY': query, 'RESPONSE': response}
|
193 |
+
prompt = PromptTemplate(llm_eval_prompt).format(**data)
|
194 |
+
return OpenAI().complete(prompt)
|
195 |
+
|
196 |
def plot_full_kg(kg_plot_path):
|
197 |
"""Plot the full knowledge graph and return the HTML representation."""
|
198 |
# return HTML(filename=kg_plot_path)
|