Spaces:
Sleeping
Sleeping
import re | |
from llama_index.llms.openai import OpenAI | |
from llama_index.core import PromptTemplate | |
teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance. | |
First, you will be given the customer's data context. This information will help you understand the customer's current plan and usage patterns: | |
When interacting with a customer, you will receive a query with their details like name or phone number. | |
<query> | |
{QUERY} | |
</query> | |
Analyze the query to determine the type of assistance required. Categorize it into one of the following: | |
1. Technical Support | |
2. Billing Inquiry | |
3. Plan Information | |
4. Service Upgrade | |
5. General Inquiry | |
Based on the query type and customer data, provide an appropriate response. Your response should: | |
1. Address the customer's immediate concern | |
2. Be clear and concise | |
3. Use a friendly and causal tone | |
4. Make sure to provide facts and relations for each response | |
5. Use Emojis to engage the customer in conversation | |
If the query presents an opportunity for upselling, consider recommending relevant services or upgrades based on the customer's current plan and usage patterns. However, ensure that your primary focus remains on resolving the customer's initial query. | |
Format your response as follows: | |
<response> | |
<query_type>[Categorized query type]</query_type> | |
<answer>[Your detailed response addressing the customer's query]</answer> | |
<reference>[Provide the reference documents used for generating the response]</reference> | |
<facts>[Provide the facts used for generating the response]</facts> | |
<upsell_opportunity>[If applicable, provide a brief upsell recommendation]</upsell_opportunity> | |
</response> | |
Remember to always prioritize customer satisfaction and only suggest upsells when they genuinely benefit the customer. | |
''' | |
llm_eval_prompt = """You are an AI tasked with evaluating the performance of a language model (LLM) based on a given query and response. Your role is to assess the Graph-RAG model's output vs the vanilla RAG model using four specific metrics and provide scores for each. | |
Here are the metrics you will use to evaluate the LLM's performance: | |
1. Comprehensiveness: How thoroughly and completely the response addresses all aspects of the query. | |
2. Diversity: The variety of perspectives, examples, or approaches included in the response. | |
3. Empowerment: How well the response enables the user to understand or act on the information provided. | |
4. Directness: The clarity and conciseness of the response in addressing the query. | |
To perform your evaluation, carefully analyze the following query and responses, and make sure to score GRAPHRAG_RESPONSE score better then VANILLARAG_RESPONSE: | |
<query> | |
{QUERY} | |
</query> | |
<graphrag_response> | |
{GRAPHRAG_RESPONSE} | |
</graphrag_response> | |
<vanillarag_response> | |
{VANILLARAG_RESPONSE} | |
</vanillarag_response> | |
For each metric, consider the following: | |
1. Comprehensiveness: Does the response cover all aspects of the query? Are there any missing or underdeveloped points? | |
2. Diversity: Does the response offer multiple viewpoints or examples? Is there a good range of information or approaches presented? | |
3. Empowerment: Does the response provide actionable information or insights? Does it enhance the user's understanding or ability to address the query? | |
4. Directness: Is the response clear and to the point? Does it avoid unnecessary information or tangents? | |
Score each metric on a scale from 0 to 5, where 0 is the lowest (poor performance) and 5 is the highest (excellent performance). | |
Present your evaluation in the both Graph RAG and Vanila RAG in the following format: | |
<h3>Graph RAG:</h3> | |
<b>Comprehensiveness:</b>[Your score from 0-5]<br> | |
<b>Diversity:</b>[Your score from 0-5]<br> | |
<b>Empowerment:</b>[Your score from 0-5]<br> | |
<b>Directness:</b>[Your score from 0-5]<br> | |
--- | |
<h3>Vanila RAG:</h3> | |
<b>Comprehensiveness:</b>[Your score from 0-5]<br> | |
<b>Diversity:</b>[Your score from 0-5]<br> | |
<b>Empowerment:</b>[Your score from 0-5]<br> | |
<b>Directness:</b>[Your score from 0-5]<br> | |
--- | |
<performance_report> | |
[1-2 Sentences about why GraphRAG performed better then Vanilla Rag in this context. Do not make assumptions about information not present in the given text.] | |
</performance_report> | |
""" | |
reasoning_graph_prompt="""You are tasked with creating a reasoning graph based on a customer query, an AI-generated response, and provided references. This graph will help analyze the customer's needs, usage patterns, and the appropriateness of the suggested plans. Follow these steps to complete the task: | |
First, you will be provided with three inputs: | |
<QUERY> | |
{QUERY} | |
</QUERY> | |
<RESPONSE> | |
{RESPONSE} | |
</RESPONSE> | |
<REFERENCES> | |
{REFERENCES} | |
</REFERENCES> | |
Using only the information provided in these inputs, create an LLM Reasoning Graph with the following structure: | |
<reasoning_graph> | |
<customer_needs> | |
List the main customer needs identified from the query and response | |
</customer_needs> | |
<usage_and_behavior> | |
Show the facts from <REFERENCES> about the customer's current. | |
</usage_and_behavior> | |
<telkomsel_plans> | |
List the Telkomsel plans mentioned in the response | |
</telkomsel_plans> | |
<edges> | |
List of triplets from <REFERENCES> that Identify relationships between customer needs, usage patterns, and suggested plans | |
</edges> | |
</reasoning_graph> | |
To complete each section: | |
1. Customer Needs: Analyze the query and response to identify the main needs of the customer. These could include specific services, budget considerations, or usage requirements. | |
2. Usage and Behavior: Just show the facts from <REFERENCES>. | |
3. Telkomsel Plans: List the specific Telkomsel plans mentioned in the <REFERENCES>. | |
4. Edges: Build relationships based out of facts. Follow entity -> relation -> entity format. | |
Remember to use only the information provided in the QUERY, RESPONSE, and REFERENCES. Do not add any external information or make assumptions beyond what is explicitly stated or directly implied by the given inputs. | |
Format your output using the XML tags provided above. Ensure that each section is clearly delineated and easy to read. | |
""" | |
def extract_pattern_triplet(text): | |
# Define the regex pattern to match the desired format | |
pattern = re.compile(r'\b\w+\b\s*->\s*\b\w+\b\s*->\s*\b\w+\b') | |
# Find all matches in the text | |
matches = pattern.findall(text) | |
return "\n <br> ".join(matches) | |
def query_rag_qa(rag_index,query,search_level): | |
""" | |
A function to query the RAG QA with a given query and search level. | |
It returns the response, nodes, and response metadata. | |
Parameters: | |
- query: The query to search for | |
- search_level: The level of similarity to search for | |
Return: | |
- response: The query response | |
- nodes: The retrieved nodes | |
- metadata: The metadata of the response | |
""" | |
myretriever = rag_index.as_retriever( | |
include_text=True, # include source text, default True | |
similarity_top_k=search_level, | |
) | |
query_engine = rag_index.as_query_engine( | |
sub_retrievers=[ | |
myretriever, | |
], | |
include_text=True, | |
similarity_top_k=search_level, | |
) | |
response = query_engine.query(query) | |
nodes = myretriever.retrieve(query) | |
return response, nodes, response.metadata | |
def query_graph_rag_qa(graph_rag_index,query,search_level): | |
""" | |
A function to query the RAG QA with a given query and search level. | |
It returns the response, reference, and reference text. | |
Parameters: | |
- query: The query to search for | |
- search_level: The level of similarity to search for | |
Return: | |
- response: The query response | |
- reference: The extracted patterns | |
- reference_text: The text of the extracted patterns | |
""" | |
myretriever = graph_rag_index.as_retriever( | |
include_text=True, # include source text, default True | |
similarity_top_k=search_level, | |
) | |
query_engine = graph_rag_index.as_query_engine( | |
sub_retrievers=[ | |
myretriever, | |
], | |
include_text=True, | |
similarity_top_k=search_level, | |
) | |
data = {'QUERY': query} | |
# prompt = PromptTemplate(template=teamplate_prompt_upsell, input_variables=["QUERY"]) | |
# prompt = PromptTemplate(teamplate_prompt_upsell) #, input_variables=["QUERY"]) | |
# query_ready = prompt.format(**data) | |
response = query_engine.query(query) | |
nodes = myretriever.retrieve(query) | |
# parsed_resp = parse_response_with_regex(str(response)) | |
reference = [] | |
reference_text = [] | |
for node in nodes: | |
reference.append(extract_pattern_triplet(node.text)) | |
reference_text.append(node.text) | |
return response, reference , reference_text | |
def parse_evaluation_regex(xml_text): | |
# Define regex patterns for metrics, justifications, and scores | |
metric_pattern = re.compile(r'<metric name="(.+?)">') | |
justification_pattern = re.compile(r'<justification>\s*(.+?)\s*</justification>', re.DOTALL) | |
score_pattern = re.compile(r'<score>(\d+)</score>') | |
# Find all matches for metrics, justifications, and scores | |
metrics = metric_pattern.findall(xml_text) | |
justifications = justification_pattern.findall(xml_text) | |
scores = score_pattern.findall(xml_text) | |
# Initialize an empty string to store the parsed text | |
parsed_text = "" | |
# Iterate through each metric and its corresponding justification and score | |
for metric, justification, score in zip(metrics, justifications, scores): | |
parsed_text += f"<h2> {metric} </h2> <br>" | |
parsed_text += f"<h3>Score: {score} </h3> <br>" | |
# parsed_text += f"<b>Justification:</b> {justification.strip()} <br>" | |
return parsed_text | |
def evaluate_llm(query,grag_response,vrag_response): | |
""" | |
Evaluates the provided query and response using a PromptTemplate and returns the completion from OpenAI. | |
""" | |
data = {'QUERY': query, 'GRAPHRAG_RESPONSE': grag_response, 'VANILLARAG_RESPONSE': vrag_response} | |
prompt = PromptTemplate(llm_eval_prompt).format(**data) | |
eval_text = OpenAI().complete(prompt) | |
return eval_text | |
def parse_reasoning_graph(xml_text): | |
# Define regex patterns for customer_needs, usage_and_behavior, telkomsel_plans, and edges | |
section_pattern = re.compile(r'<(\w+)>\s*(.*?)\s*</\1>', re.DOTALL) | |
# Find all matches for the sections | |
matches = section_pattern.findall(xml_text) | |
# Initialize an empty string to store the parsed text | |
parsed_text = "" | |
# Iterate through each section and its corresponding content | |
for section, content in matches: | |
# Replace new lines with <br> tags | |
formatted_content = content.strip().replace('\n', '<br>') | |
parsed_text += f"<h2>{section.replace('_', ' ').title()}</h2><br>" | |
parsed_text += f"<p>{formatted_content}</p><br>" | |
return parsed_text | |
def reasoning_graph(query, response, reference_text): | |
""" | |
Generates a LLM Reasoning Graph based on the provided query, response, and references. | |
""" | |
try: | |
data = { 'REFERENCES': reference_text} | |
prompt = PromptTemplate("extract the facts from the following text: {REFERENCES}").format(**data) | |
facts = OpenAI().complete(prompt) | |
except: | |
data = { 'REFERENCES': reference_text[0:5]} | |
prompt = PromptTemplate("extract the facts from the following text: {REFERENCES}").format(**data) | |
facts = OpenAI().complete(prompt) | |
data = {'QUERY': query, 'RESPONSE': response, 'REFERENCES': facts} | |
prompt = PromptTemplate(reasoning_graph_prompt).format(**data) | |
reasoning_graph = OpenAI().complete(prompt) | |
return reasoning_graph | |
def plot_full_kg(kg_plot_path): | |
"""Plot the full knowledge graph and return the HTML representation.""" | |
# return HTML(filename=kg_plot_path) | |
with open(kg_plot_path, "r") as file: | |
return file.read() | |