Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,30 +2,28 @@ import os,re
|
|
2 |
import gradio as gr
|
3 |
import nest_asyncio
|
4 |
from langchain import PromptTemplate
|
5 |
-
from llama_index.core import StorageContext, load_index_from_storage
|
|
|
|
|
6 |
import networkx as nx
|
7 |
from pyvis.network import Network
|
8 |
from IPython.display import HTML, Markdown, display
|
9 |
|
10 |
-
# nest_asyncio.apply()
|
11 |
-
|
12 |
-
kg_index_path = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
|
13 |
-
kg_plot_path = kg_index_path+"full_kg.html"
|
14 |
os.environ["OPENAI_API_KEY"] = os.getenv('oai')
|
15 |
|
16 |
|
17 |
-
|
|
|
|
|
|
|
18 |
StorageContext.from_defaults(persist_dir=kg_index_path)
|
19 |
)
|
20 |
-
query_engine = index.as_query_engine(
|
21 |
-
include_text=True,
|
22 |
-
similarity_top_k=20,
|
23 |
-
)
|
24 |
-
retriever = index.as_retriever(
|
25 |
-
include_text=True, # include source text, default True
|
26 |
-
similarity_top_k=20,
|
27 |
-
)
|
28 |
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance.
|
31 |
|
@@ -66,41 +64,70 @@ Remember to always prioritize customer satisfaction and only suggest upsells whe
|
|
66 |
'''
|
67 |
|
68 |
|
|
|
69 |
|
|
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
reference_pattern = re.compile(r'<reference>(.*?)</reference>', re.DOTALL)
|
76 |
-
facts_pattern = re.compile(r'<facts>(.*?)</facts>', re.DOTALL)
|
77 |
-
upsell_opportunity_pattern = re.compile(r'<upsell_opportunity>(.*?)</upsell_opportunity>', re.DOTALL)
|
78 |
|
79 |
-
|
80 |
-
query_type = query_type_pattern.search(xml_response).group(1).strip()
|
81 |
-
answer = answer_pattern.search(xml_response).group(1).strip()
|
82 |
-
reference = reference_pattern.search(xml_response).group(1).strip()
|
83 |
-
facts = facts_pattern.search(xml_response).group(1).strip()
|
84 |
-
upsell_opportunity = upsell_opportunity_pattern.search(xml_response).group(1).strip()
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
{query_type}
|
90 |
|
91 |
-
|
92 |
-
|
|
|
93 |
|
94 |
-
|
95 |
-
{reference}
|
96 |
|
97 |
-
|
98 |
-
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
def extract_pattern_triplet(text):
|
106 |
# Define the regex pattern to match the desired format
|
@@ -109,15 +136,61 @@ def extract_pattern_triplet(text):
|
|
109 |
matches = pattern.findall(text)
|
110 |
return "\n <br> ".join(matches)
|
111 |
|
112 |
-
def
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
reference = []
|
123 |
reference_text = []
|
@@ -125,11 +198,36 @@ def query_tqa(query):
|
|
125 |
reference.append(extract_pattern_triplet(node.text))
|
126 |
reference_text.append(node.text)
|
127 |
|
128 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
def plot_full_kg():
|
131 |
"""Plot the full knowledge graph and return the HTML representation."""
|
132 |
-
return HTML(filename=kg_plot_path)
|
|
|
|
|
133 |
|
134 |
with gr.Blocks() as demo:
|
135 |
gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")
|
@@ -137,23 +235,51 @@ with gr.Blocks() as demo:
|
|
137 |
with gr.Tab("Virtual Assistant"):
|
138 |
with gr.Row():
|
139 |
query_input = gr.Textbox(label="Input Your Query..")
|
|
|
|
|
140 |
with gr.Row():
|
141 |
-
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
with gr.Row():
|
144 |
-
|
145 |
-
|
|
|
146 |
|
147 |
|
148 |
-
ask_button = gr.Button("Ask TelcomVA!!")
|
149 |
|
150 |
with gr.Accordion("Explore KG!", open=False):
|
151 |
-
gr.Markdown("This KG is built using a subset of Github repositories. ")
|
152 |
kg_output = gr.HTML()
|
153 |
plot_button = gr.Button("Plot Full KG!!")
|
154 |
|
155 |
-
ask_button.click(query_tqa,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
plot_button.click(plot_full_kg, outputs=kg_output)
|
|
|
157 |
examples = gr.Examples(
|
158 |
examples=[
|
159 |
["what are the upselling ideas for roaming package you can recommend for customer Rina Wati."],
|
@@ -161,5 +287,6 @@ with gr.Blocks() as demo:
|
|
161 |
inputs=[query_input]
|
162 |
)
|
163 |
|
|
|
164 |
demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)
|
165 |
# demo.launch(share=False)
|
|
|
2 |
import gradio as gr
|
3 |
import nest_asyncio
|
4 |
from langchain import PromptTemplate
|
5 |
+
from llama_index.core import PromptTemplate, StorageContext, load_index_from_storage
|
6 |
+
from llama_index.llms.openai import OpenAI
|
7 |
+
|
8 |
import networkx as nx
|
9 |
from pyvis.network import Network
|
10 |
from IPython.display import HTML, Markdown, display
|
11 |
|
|
|
|
|
|
|
|
|
12 |
os.environ["OPENAI_API_KEY"] = os.getenv('oai')
|
13 |
|
14 |
|
15 |
+
#Graph-RAG
|
16 |
+
kg_index_path = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
|
17 |
+
kg_plot_path = kg_index_path+"/full_kg.html"
|
18 |
+
graph_rag_index = load_index_from_storage(
|
19 |
StorageContext.from_defaults(persist_dir=kg_index_path)
|
20 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
#RAG
|
23 |
+
rag_index_path = "./telcom_RAG_full_withpackagedata_category/"
|
24 |
+
rag_index = load_index_from_storage(
|
25 |
+
StorageContext.from_defaults(persist_dir=rag_index_path)
|
26 |
+
)
|
27 |
|
28 |
teamplate_prompt_upsell = '''You are a virtual assistant for a telecom company, designed to assist users with their queries and potentially upsell services. Your task is to analyze the customer's data from context, their query, and offer the most appropriate assistance.
|
29 |
|
|
|
64 |
'''
|
65 |
|
66 |
|
67 |
+
llm_eval_prompt = """You are an AI tasked with evaluating the performance of a language model (LLM) based on a given query and response. Your role is to assess the LLM's output using four specific metrics and provide scores for each.
|
68 |
|
69 |
+
Here are the metrics you will use to evaluate the LLM's performance:
|
70 |
|
71 |
+
1. Comprehensiveness: How thoroughly and completely the response addresses all aspects of the query.
|
72 |
+
2. Diversity: The variety of perspectives, examples, or approaches included in the response.
|
73 |
+
3. Empowerment: How well the response enables the user to understand or act on the information provided.
|
74 |
+
4. Directness: The clarity and conciseness of the response in addressing the query.
|
|
|
|
|
|
|
75 |
|
76 |
+
To perform your evaluation, carefully analyze the following query and response:
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
+
<query>
|
79 |
+
{QUERY}
|
80 |
+
</query>
|
|
|
81 |
|
82 |
+
<response>
|
83 |
+
{RESPONSE}
|
84 |
+
</response>
|
85 |
|
86 |
+
For each metric, consider the following:
|
|
|
87 |
|
88 |
+
1. Comprehensiveness: Does the response cover all aspects of the query? Are there any missing or underdeveloped points?
|
89 |
+
2. Diversity: Does the response offer multiple viewpoints or examples? Is there a good range of information or approaches presented?
|
90 |
+
3. Empowerment: Does the response provide actionable information or insights? Does it enhance the user's understanding or ability to address the query?
|
91 |
+
4. Directness: Is the response clear and to the point? Does it avoid unnecessary information or tangents?
|
92 |
|
93 |
+
Score each metric on a scale from 0 to 5, where 0 is the lowest (poor performance) and 5 is the highest (excellent performance).
|
94 |
+
|
95 |
+
For each metric, provide a brief justification for your score before stating the score itself. Your justification should reference specific aspects of the query and response that influenced your decision.
|
96 |
+
|
97 |
+
Present your evaluation in the following format:
|
98 |
+
|
99 |
+
<evaluation>
|
100 |
+
<metric name="Comprehensiveness">
|
101 |
+
<justification>
|
102 |
+
[Your justification for the Comprehensiveness score]
|
103 |
+
</justification>
|
104 |
+
<score>[Your score from 0-5]</score>
|
105 |
+
</metric>
|
106 |
+
|
107 |
+
<metric name="Diversity">
|
108 |
+
<justification>
|
109 |
+
[Your justification for the Diversity score]
|
110 |
+
</justification>
|
111 |
+
<score>[Your score from 0-5]</score>
|
112 |
+
</metric>
|
113 |
+
|
114 |
+
<metric name="Empowerment">
|
115 |
+
<justification>
|
116 |
+
[Your justification for the Empowerment score]
|
117 |
+
</justification>
|
118 |
+
<score>[Your score from 0-5]</score>
|
119 |
+
</metric>
|
120 |
+
|
121 |
+
<metric name="Directness">
|
122 |
+
<justification>
|
123 |
+
[Your justification for the Directness score]
|
124 |
+
</justification>
|
125 |
+
<score>[Your score from 0-5]</score>
|
126 |
+
</metric>
|
127 |
+
</evaluation>
|
128 |
+
|
129 |
+
Ensure that your evaluation is fair, objective, and based solely on the provided query and response. Do not make assumptions about information not present in the given text.
|
130 |
+
"""
|
131 |
|
132 |
def extract_pattern_triplet(text):
|
133 |
# Define the regex pattern to match the desired format
|
|
|
136 |
matches = pattern.findall(text)
|
137 |
return "\n <br> ".join(matches)
|
138 |
|
139 |
+
def query_rag_qa(query,search_level):
|
140 |
+
"""
|
141 |
+
A function to query the RAG QA with a given query and search level.
|
142 |
+
It returns the response, nodes, and response metadata.
|
143 |
+
Parameters:
|
144 |
+
- query: The query to search for
|
145 |
+
- search_level: The level of similarity to search for
|
146 |
+
Return:
|
147 |
+
- response: The query response
|
148 |
+
- nodes: The retrieved nodes
|
149 |
+
- metadata: The metadata of the response
|
150 |
+
"""
|
151 |
+
myretriever = rag_index.as_retriever(
|
152 |
+
include_text=True, # include source text, default True
|
153 |
+
similarity_top_k=search_level,
|
154 |
+
)
|
155 |
+
query_engine = rag_index.as_query_engine(
|
156 |
+
sub_retrievers=[
|
157 |
+
myretriever,
|
158 |
+
],
|
159 |
+
include_text=True,
|
160 |
+
similarity_top_k=search_level,
|
161 |
+
)
|
162 |
+
response = query_engine.query(query)
|
163 |
+
nodes = myretriever.retrieve(query)
|
164 |
+
|
165 |
+
return response, nodes, response.metadata
|
166 |
+
|
167 |
+
|
168 |
+
def query_graph_rag_qa(query,search_level):
|
169 |
+
"""
|
170 |
+
A function to query the RAG QA with a given query and search level.
|
171 |
+
It returns the response, reference, and reference text.
|
172 |
+
Parameters:
|
173 |
+
- query: The query to search for
|
174 |
+
- search_level: The level of similarity to search for
|
175 |
+
Return:
|
176 |
+
- response: The query response
|
177 |
+
- reference: The extracted patterns
|
178 |
+
- reference_text: The text of the extracted patterns
|
179 |
+
"""
|
180 |
+
myretriever = graph_rag_index.as_retriever(
|
181 |
+
include_text=True, # include source text, default True
|
182 |
+
similarity_top_k=search_level,
|
183 |
+
)
|
184 |
+
query_engine = graph_rag_index.as_query_engine(
|
185 |
+
sub_retrievers=[
|
186 |
+
myretriever,
|
187 |
+
],
|
188 |
+
include_text=True,
|
189 |
+
similarity_top_k=search_level,
|
190 |
+
)
|
191 |
+
response = query_engine.query(query)
|
192 |
+
nodes = myretriever.retrieve(query)
|
193 |
+
# parsed_resp = parse_response_with_regex(str(response))
|
194 |
|
195 |
reference = []
|
196 |
reference_text = []
|
|
|
198 |
reference.append(extract_pattern_triplet(node.text))
|
199 |
reference_text.append(node.text)
|
200 |
|
201 |
+
return response, reference , reference_text
|
202 |
+
|
203 |
+
def query_tqa(query,search_level):
|
204 |
+
grag_response, grag_reference , grag_reference_text = query_graph_rag_qa(query,search_level)
|
205 |
+
rag_response, rag_reference, rag_reference_text = query_rag_qa(query,search_level)
|
206 |
+
return grag_response, grag_reference , grag_reference_text, rag_response, rag_reference, rag_reference_text
|
207 |
+
|
208 |
+
|
209 |
+
def eval_llm(query,rag_response,grag_response):
|
210 |
+
data = {'QUERY': query,
|
211 |
+
'RESPONSE': rag_response
|
212 |
+
}
|
213 |
+
prompt = PromptTemplate(llm_eval_prompt)
|
214 |
+
query_ready = prompt.format(**data)
|
215 |
+
rag_eval = OpenAI().complete(query_ready)
|
216 |
+
|
217 |
+
data = {'QUERY': query,
|
218 |
+
'RESPONSE': grag_response
|
219 |
+
}
|
220 |
+
prompt = PromptTemplate(llm_eval_prompt)
|
221 |
+
query_ready = prompt.format(**data)
|
222 |
+
grag_eval = OpenAI().complete(query_ready)
|
223 |
+
return grag_eval,rag_eval
|
224 |
+
|
225 |
|
226 |
def plot_full_kg():
|
227 |
"""Plot the full knowledge graph and return the HTML representation."""
|
228 |
+
# return HTML(filename=kg_plot_path)
|
229 |
+
with open(kg_plot_path, "r") as file:
|
230 |
+
return file.read()
|
231 |
|
232 |
with gr.Blocks() as demo:
|
233 |
gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")
|
|
|
235 |
with gr.Tab("Virtual Assistant"):
|
236 |
with gr.Row():
|
237 |
query_input = gr.Textbox(label="Input Your Query..")
|
238 |
+
search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level")
|
239 |
+
ask_button = gr.Button("Ask TelcomVA!!")
|
240 |
with gr.Row():
|
241 |
+
with gr.Accordion("Graph-RAG!", open=True):
|
242 |
+
grag_output = gr.Textbox(label="Response")
|
243 |
+
grag_reference = gr.Textbox(label="Triplets")
|
244 |
+
grag_reference_text = gr.Textbox(label="Extracted Reference raw")
|
245 |
+
|
246 |
+
with gr.Accordion("RAG", open=True):
|
247 |
+
rag_output = gr.Textbox(label="Response")
|
248 |
+
rag_reference = gr.Textbox(label="Extracted Reference")
|
249 |
+
rag_reference_text = gr.Textbox(label="Extracted Reference raw")
|
250 |
with gr.Row():
|
251 |
+
grag_performance = gr.Textbox(label="Graph-RAG Performance")
|
252 |
+
rag_performance = gr.Textbox(label="RAG Performance")
|
253 |
+
eval_button = gr.Button("Evaluate LLMs!!")
|
254 |
|
255 |
|
|
|
256 |
|
257 |
with gr.Accordion("Explore KG!", open=False):
|
|
|
258 |
kg_output = gr.HTML()
|
259 |
plot_button = gr.Button("Plot Full KG!!")
|
260 |
|
261 |
+
ask_button.click(query_tqa,
|
262 |
+
inputs=[query_input,search_level],
|
263 |
+
outputs=[
|
264 |
+
grag_output,
|
265 |
+
grag_reference,
|
266 |
+
grag_reference_text,
|
267 |
+
rag_output,
|
268 |
+
rag_reference,
|
269 |
+
rag_reference_text
|
270 |
+
]
|
271 |
+
)
|
272 |
+
|
273 |
+
eval_button.click(eval_llm,
|
274 |
+
inputs=[query_input,rag_output,grag_output],
|
275 |
+
outputs=[
|
276 |
+
grag_performance,
|
277 |
+
rag_performance
|
278 |
+
]
|
279 |
+
)
|
280 |
+
|
281 |
plot_button.click(plot_full_kg, outputs=kg_output)
|
282 |
+
|
283 |
examples = gr.Examples(
|
284 |
examples=[
|
285 |
["what are the upselling ideas for roaming package you can recommend for customer Rina Wati."],
|
|
|
287 |
inputs=[query_input]
|
288 |
)
|
289 |
|
290 |
+
|
291 |
demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)
|
292 |
# demo.launch(share=False)
|