Spaces:
Sleeping
Sleeping
format the output
Browse files- telcom_core.py +25 -1
telcom_core.py
CHANGED
@@ -185,14 +185,38 @@ def query_graph_rag_qa(graph_rag_index,query,search_level):
|
|
185 |
|
186 |
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
def evaluate_llm(query,response):
|
189 |
"""
|
190 |
Evaluates the provided query and response using a PromptTemplate and returns the completion from OpenAI.
|
191 |
"""
|
192 |
data = {'QUERY': query, 'RESPONSE': response}
|
193 |
prompt = PromptTemplate(llm_eval_prompt).format(**data)
|
194 |
-
|
|
|
195 |
|
|
|
196 |
def plot_full_kg(kg_plot_path):
|
197 |
"""Plot the full knowledge graph and return the HTML representation."""
|
198 |
# return HTML(filename=kg_plot_path)
|
|
|
185 |
|
186 |
|
187 |
|
188 |
+
def parse_evaluation_regex(xml_text):
|
189 |
+
# Define regex patterns for metrics, justifications, and scores
|
190 |
+
metric_pattern = re.compile(r'<metric name="(.+?)">')
|
191 |
+
justification_pattern = re.compile(r'<justification>\s*(.+?)\s*</justification>', re.DOTALL)
|
192 |
+
score_pattern = re.compile(r'<score>(\d+)</score>')
|
193 |
+
|
194 |
+
# Find all matches for metrics, justifications, and scores
|
195 |
+
metrics = metric_pattern.findall(xml_text)
|
196 |
+
justifications = justification_pattern.findall(xml_text)
|
197 |
+
scores = score_pattern.findall(xml_text)
|
198 |
+
|
199 |
+
# Initialize an empty string to store the parsed text
|
200 |
+
parsed_text = ""
|
201 |
+
|
202 |
+
# Iterate through each metric and its corresponding justification and score
|
203 |
+
for metric, justification, score in zip(metrics, justifications, scores):
|
204 |
+
parsed_text += f"<h2> {metric} </h2> <br>"
|
205 |
+
parsed_text += f"<h3>Score: {score} </h3> <br>"
|
206 |
+
parsed_text += f"<b>Justification:</b> {justification.strip()} <br>"
|
207 |
+
|
208 |
+
return parsed_text
|
209 |
+
|
210 |
def evaluate_llm(query,response):
|
211 |
"""
|
212 |
Evaluates the provided query and response using a PromptTemplate and returns the completion from OpenAI.
|
213 |
"""
|
214 |
data = {'QUERY': query, 'RESPONSE': response}
|
215 |
prompt = PromptTemplate(llm_eval_prompt).format(**data)
|
216 |
+
eval_text = OpenAI().complete(prompt)
|
217 |
+
return eval_text.text
|
218 |
|
219 |
+
|
220 |
def plot_full_kg(kg_plot_path):
|
221 |
"""Plot the full knowledge graph and return the HTML representation."""
|
222 |
# return HTML(filename=kg_plot_path)
|