telcom-va / app.py
rknl's picture
Update app.py
75c93bc verified
import os
import gradio as gr
import nest_asyncio
from llama_index.core import StorageContext, load_index_from_storage
from telcom_core import query_rag_qa, query_graph_rag_qa, plot_full_kg, evaluate_llm, parse_evaluation_regex,reasoning_graph
# Define constants for index paths
os.environ["OPENAI_API_KEY"] = os.getenv('oai')
KG_INDEX_PATH = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
KG_PLOT_PATH = KG_INDEX_PATH + "/full_kg.html"
RAG_INDEX_PATH = "./telcom_RAG_full_withpackagedata_category/"
# Load Graph-RAG index
graph_rag_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=KG_INDEX_PATH)
)
# Load RAG index
rag_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH)
)
def query_tqa(query, search_level):
"""
Query both Graph-RAG and RAG models and return their responses and references.
Args:
query (str): The query to be processed.
search_level (str): The level of search to be performed.
Returns:
tuple: Responses and references from both Graph-RAG and RAG models.
"""
grag_response, grag_reference, grag_reference_text = query_graph_rag_qa(graph_rag_index, query, search_level)
rag_response, rag_reference, rag_reference_text = query_rag_qa(rag_index, query, search_level)
return grag_response, grag_reference, grag_reference_text, rag_response, rag_reference, rag_reference_text
def eval_llm(query, rag_response, grag_response):
"""
Evaluate responses from both Graph-RAG and RAG models using an LLM.
Args:
query (str): The query that was used to generate the responses.
rag_response (str): The response from the RAG model.
grag_response (str): The response from the Graph-RAG model.
Returns:
tuple: Evaluation results for both responses.
"""
eval_text = evaluate_llm(query, grag_response, rag_response)
return eval_text #, parse_evaluation_regex(grag_eval)
with gr.Blocks() as demo:
gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")
with gr.Tab("Virtual Assistant"):
with gr.Row():
query_input = gr.Textbox(label="Input Your Query..")
search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level")
ask_button = gr.Button("Ask TelcomVA!!")
with gr.Row():
with gr.Accordion("Graph-RAG!", open=True):
grag_output = gr.Textbox(label="Response")
grag_reference = gr.Textbox(label="Triplets")
with gr.Accordion("Extracted Reference raw", open=False):
grag_reference_text = gr.Textbox(label="Extracted Reference raw")
with gr.Accordion("RAG", open=True):
rag_output = gr.Textbox(label="Response")
rag_reference = gr.Textbox(label="Extracted Reference")
with gr.Accordion("Extracted Reference raw", open=False):
rag_reference_text = gr.Textbox(label="Extracted Reference raw")
with gr.Row():
grag_performance = gr.Textbox(label="Graph-RAG Performance")
# rag_performance = gr.HTML(label="RAG Performance")
eval_button = gr.Button("Evaluate LLMs!!")
with gr.Row():
grag_reasoning = gr.Textbox(label="Graph-RAG Reasoning")
reason_button = gr.Button("Get Graph Reasoning!!")
with gr.Accordion("Explore KG!", open=False):
kg_output = gr.HTML()
plot_button = gr.Button("Plot Full KG!!")
ask_button.click(query_tqa,
inputs=[query_input,search_level],
outputs=[
grag_output,
grag_reference,
grag_reference_text,
rag_output,
rag_reference,
rag_reference_text
]
)
eval_button.click(eval_llm,
inputs=[query_input,rag_output,grag_output],
outputs=[
grag_performance,
# rag_performance
]
)
reason_button.click(reasoning_graph,
inputs=[query_input,grag_output,grag_reference],
outputs=[
grag_reasoning
]
)
plot_button.click(plot_full_kg, outputs=kg_output)
examples = gr.Examples(
examples=[
["what are the upselling ideas for roaming package you can recommend for customer Rina Wati."],
["My phone number is 81226808748, i am traveling and want to get some good travel roaming plans. Give me list of plans per GB usage and cost."]
],
inputs=[query_input]
)
demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)