rknl commited on
Commit
68b45b6
·
verified ·
1 Parent(s): a1be166
Files changed (1) hide show
  1. app.py +37 -205
app.py CHANGED
@@ -9,226 +9,57 @@ import networkx as nx
9
  from pyvis.network import Network
10
  from IPython.display import HTML, Markdown, display
11
 
12
- os.environ["OPENAI_API_KEY"] = os.getenv('oai')
13
 
 
 
 
 
 
14
 
15
- #Graph-RAG
16
- kg_index_path = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
17
- kg_plot_path = kg_index_path+"/full_kg.html"
18
  graph_rag_index = load_index_from_storage(
19
- StorageContext.from_defaults(persist_dir=kg_index_path)
20
  )
21
 
22
- #RAG
23
- rag_index_path = "./telcom_RAG_full_withpackagedata_category/"
24
  rag_index = load_index_from_storage(
25
- StorageContext.from_defaults(persist_dir=rag_index_path)
26
  )
27
 
28
- teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance.
29
-
30
- First, you will be given the customer's data context. This information will help you understand the customer's current plan and usage patterns:
31
-
32
- When interacting with a customer, you will receive a query with their details like name or phone number.
33
- <query>
34
- {QUERY}
35
- </query>
36
-
37
- Analyze the query to determine the type of assistance required. Categorize it into one of the following:
38
- 1. Technical Support
39
- 2. Billing Inquiry
40
- 3. Plan Information
41
- 4. Service Upgrade
42
- 5. General Inquiry
43
-
44
- Based on the query type and customer data, provide an appropriate response. Your response should:
45
- 1. Address the customer's immediate concern
46
- 2. Be clear and concise
47
- 3. Use a friendly and causal tone
48
- 4. Make sure to provide facts and relations for each response
49
- 5. Use Emojis to engage the customer in conversation
50
-
51
- If the query presents an opportunity for upselling, consider recommending relevant services or upgrades based on the customer's current plan and usage patterns. However, ensure that your primary focus remains on resolving the customer's initial query.
52
-
53
- Format your response as follows:
54
-
55
- <response>
56
- <query_type>[Categorized query type]</query_type>
57
- <answer>[Your detailed response addressing the customer's query]</answer>
58
- <reference>[Provide the reference documents used for generating the response]</reference>
59
- <facts>[Provide the facts used for generating the response]</facts>
60
- <upsell_opportunity>[If applicable, provide a brief upsell recommendation]</upsell_opportunity>
61
- </response>
62
-
63
- Remember to always prioritize customer satisfaction and only suggest upsells when they genuinely benefit the customer.
64
- '''
65
-
66
-
67
- llm_eval_prompt = """You are an AI tasked with evaluating the performance of a language model (LLM) based on a given query and response. Your role is to assess the LLM's output using four specific metrics and provide scores for each.
68
-
69
- Here are the metrics you will use to evaluate the LLM's performance:
70
-
71
- 1. Comprehensiveness: How thoroughly and completely the response addresses all aspects of the query.
72
- 2. Diversity: The variety of perspectives, examples, or approaches included in the response.
73
- 3. Empowerment: How well the response enables the user to understand or act on the information provided.
74
- 4. Directness: The clarity and conciseness of the response in addressing the query.
75
-
76
- To perform your evaluation, carefully analyze the following query and response:
77
-
78
- <query>
79
- {QUERY}
80
- </query>
81
-
82
- <response>
83
- {RESPONSE}
84
- </response>
85
-
86
- For each metric, consider the following:
87
-
88
- 1. Comprehensiveness: Does the response cover all aspects of the query? Are there any missing or underdeveloped points?
89
- 2. Diversity: Does the response offer multiple viewpoints or examples? Is there a good range of information or approaches presented?
90
- 3. Empowerment: Does the response provide actionable information or insights? Does it enhance the user's understanding or ability to address the query?
91
- 4. Directness: Is the response clear and to the point? Does it avoid unnecessary information or tangents?
92
-
93
- Score each metric on a scale from 0 to 5, where 0 is the lowest (poor performance) and 5 is the highest (excellent performance).
94
-
95
- For each metric, provide a brief justification for your score before stating the score itself. Your justification should reference specific aspects of the query and response that influenced your decision.
96
-
97
- Present your evaluation in the following format:
98
-
99
- <evaluation>
100
- <metric name="Comprehensiveness">
101
- <justification>
102
- [Your justification for the Comprehensiveness score]
103
- </justification>
104
- <score>[Your score from 0-5]</score>
105
- </metric>
106
-
107
- <metric name="Diversity">
108
- <justification>
109
- [Your justification for the Diversity score]
110
- </justification>
111
- <score>[Your score from 0-5]</score>
112
- </metric>
113
-
114
- <metric name="Empowerment">
115
- <justification>
116
- [Your justification for the Empowerment score]
117
- </justification>
118
- <score>[Your score from 0-5]</score>
119
- </metric>
120
-
121
- <metric name="Directness">
122
- <justification>
123
- [Your justification for the Directness score]
124
- </justification>
125
- <score>[Your score from 0-5]</score>
126
- </metric>
127
- </evaluation>
128
-
129
- Ensure that your evaluation is fair, objective, and based solely on the provided query and response. Do not make assumptions about information not present in the given text.
130
- """
131
-
132
- def extract_pattern_triplet(text):
133
- # Define the regex pattern to match the desired format
134
- pattern = re.compile(r'\b\w+\b\s*->\s*\b\w+\b\s*->\s*\b\w+\b')
135
- # Find all matches in the text
136
- matches = pattern.findall(text)
137
- return "\n <br> ".join(matches)
138
-
139
- def query_rag_qa(query,search_level):
140
- """
141
- A function to query the RAG QA with a given query and search level.
142
- It returns the response, nodes, and response metadata.
143
- Parameters:
144
- - query: The query to search for
145
- - search_level: The level of similarity to search for
146
- Return:
147
- - response: The query response
148
- - nodes: The retrieved nodes
149
- - metadata: The metadata of the response
150
  """
151
- myretriever = rag_index.as_retriever(
152
- include_text=True, # include source text, default True
153
- similarity_top_k=search_level,
154
- )
155
- query_engine = rag_index.as_query_engine(
156
- sub_retrievers=[
157
- myretriever,
158
- ],
159
- include_text=True,
160
- similarity_top_k=search_level,
161
- )
162
- response = query_engine.query(query)
163
- nodes = myretriever.retrieve(query)
164
 
165
- return response, nodes, response.metadata
 
 
166
 
167
-
168
- def query_graph_rag_qa(query,search_level):
169
- """
170
- A function to query the RAG QA with a given query and search level.
171
- It returns the response, reference, and reference text.
172
- Parameters:
173
- - query: The query to search for
174
- - search_level: The level of similarity to search for
175
- Return:
176
- - response: The query response
177
- - reference: The extracted patterns
178
- - reference_text: The text of the extracted patterns
179
  """
180
- myretriever = graph_rag_index.as_retriever(
181
- include_text=True, # include source text, default True
182
- similarity_top_k=search_level,
183
- )
184
- query_engine = graph_rag_index.as_query_engine(
185
- sub_retrievers=[
186
- myretriever,
187
- ],
188
- include_text=True,
189
- similarity_top_k=search_level,
190
- )
191
- response = query_engine.query(query)
192
- nodes = myretriever.retrieve(query)
193
- # parsed_resp = parse_response_with_regex(str(response))
194
-
195
- reference = []
196
- reference_text = []
197
- for node in nodes:
198
- reference.append(extract_pattern_triplet(node.text))
199
- reference_text.append(node.text)
200
-
201
- return response, reference , reference_text
202
 
203
- def query_tqa(query,search_level):
204
- grag_response, grag_reference , grag_reference_text = query_graph_rag_qa(query,search_level)
205
- rag_response, rag_reference, rag_reference_text = query_rag_qa(query,search_level)
206
- return grag_response, grag_reference , grag_reference_text, rag_response, rag_reference, rag_reference_text
207
 
 
 
 
208
 
209
- def eval_llm(query,rag_response,grag_response):
210
- data = {'QUERY': query,
211
- 'RESPONSE': rag_response
212
- }
213
- prompt = PromptTemplate(llm_eval_prompt)
214
- query_ready = prompt.format(**data)
215
- rag_eval = OpenAI().complete(query_ready)
216
-
217
- data = {'QUERY': query,
218
- 'RESPONSE': grag_response
219
- }
220
- prompt = PromptTemplate(llm_eval_prompt)
221
- query_ready = prompt.format(**data)
222
- grag_eval = OpenAI().complete(query_ready)
223
- return grag_eval,rag_eval
224
 
 
 
 
 
 
 
225
 
226
- def plot_full_kg():
227
- """Plot the full knowledge graph and return the HTML representation."""
228
- # return HTML(filename=kg_plot_path)
229
- with open(kg_plot_path, "r") as file:
230
- return file.read()
231
 
 
232
  with gr.Blocks() as demo:
233
  gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")
234
 
@@ -237,6 +68,8 @@ with gr.Blocks() as demo:
237
  query_input = gr.Textbox(label="Input Your Query..")
238
  search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level")
239
  ask_button = gr.Button("Ask TelcomVA!!")
 
 
240
  with gr.Row():
241
  with gr.Accordion("Graph-RAG!", open=True):
242
  grag_output = gr.Textbox(label="Response")
@@ -247,6 +80,8 @@ with gr.Blocks() as demo:
247
  rag_output = gr.Textbox(label="Response")
248
  rag_reference = gr.Textbox(label="Extracted Reference")
249
  rag_reference_text = gr.Textbox(label="Extracted Reference raw")
 
 
250
  with gr.Row():
251
  grag_performance = gr.Textbox(label="Graph-RAG Performance")
252
  rag_performance = gr.Textbox(label="RAG Performance")
@@ -286,7 +121,4 @@ with gr.Blocks() as demo:
286
  ],
287
  inputs=[query_input]
288
  )
289
-
290
-
291
  demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)
292
- # demo.launch(share=False)
 
9
  from pyvis.network import Network
10
  from IPython.display import HTML, Markdown, display
11
 
 
12
 
13
+ # Define constants for index paths
14
+ os.environ["OPENAI_API_KEY"] = os.getenv('oai')
15
+ KG_INDEX_PATH = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
16
+ KG_PLOT_PATH = KG_INDEX_PATH + "/full_kg.html"
17
+ RAG_INDEX_PATH = "./telcom_RAG_full_withpackagedata_category/"
18
 
19
+ # Load Graph-RAG index
 
 
20
  graph_rag_index = load_index_from_storage(
21
+ StorageContext.from_defaults(persist_dir=KG_INDEX_PATH)
22
  )
23
 
24
+ # Load RAG index
 
25
  rag_index = load_index_from_storage(
26
+ StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH)
27
  )
28
 
29
+ def query_tqa(query, search_level):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  """
31
+ Query both Graph-RAG and RAG models and return their responses and references.
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ Args:
34
+ query (str): The query to be processed.
35
+ search_level (str): The level of search to be performed.
36
 
37
+ Returns:
38
+ tuple: Responses and references from both Graph-RAG and RAG models.
 
 
 
 
 
 
 
 
 
 
39
  """
40
+ grag_response, grag_reference, grag_reference_text = query_graph_rag_qa(query, search_level)
41
+ rag_response, rag_reference, rag_reference_text = query_rag_qa(query, search_level)
42
+ return grag_response, grag_reference, grag_reference_text, rag_response, rag_reference, rag_reference_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
 
 
 
 
44
 
45
+ def eval_llm(query, rag_response, grag_response):
46
+ """
47
+ Evaluate responses from both Graph-RAG and RAG models using an LLM.
48
 
49
+ Args:
50
+ query (str): The query that was used to generate the responses.
51
+ rag_response (str): The response from the RAG model.
52
+ grag_response (str): The response from the Graph-RAG model.
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ Returns:
55
+ tuple: Evaluation results for both responses.
56
+ """
57
+ grag_eval = evaluate_llm(query, grag_response)
58
+ rag_eval = evaluate_llm(query, rag_response)
59
+ return grag_eval, rag_eval
60
 
 
 
 
 
 
61
 
62
+
63
  with gr.Blocks() as demo:
64
  gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")
65
 
 
68
  query_input = gr.Textbox(label="Input Your Query..")
69
  search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level")
70
  ask_button = gr.Button("Ask TelcomVA!!")
71
+
72
+
73
  with gr.Row():
74
  with gr.Accordion("Graph-RAG!", open=True):
75
  grag_output = gr.Textbox(label="Response")
 
80
  rag_output = gr.Textbox(label="Response")
81
  rag_reference = gr.Textbox(label="Extracted Reference")
82
  rag_reference_text = gr.Textbox(label="Extracted Reference raw")
83
+
84
+
85
  with gr.Row():
86
  grag_performance = gr.Textbox(label="Graph-RAG Performance")
87
  rag_performance = gr.Textbox(label="RAG Performance")
 
121
  ],
122
  inputs=[query_input]
123
  )
 
 
124
  demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)