Spaces:
Sleeping
Sleeping
import os,re | |
import gradio as gr | |
import nest_asyncio | |
from langchain import PromptTemplate | |
from llama_index.core import PromptTemplate, StorageContext, load_index_from_storage | |
from llama_index.llms.openai import OpenAI | |
import networkx as nx | |
from pyvis.network import Network | |
from IPython.display import HTML, Markdown, display | |
# Define constants for index paths | |
os.environ["OPENAI_API_KEY"] = os.getenv('oai') | |
KG_INDEX_PATH = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/" | |
KG_PLOT_PATH = KG_INDEX_PATH + "/full_kg.html" | |
RAG_INDEX_PATH = "./telcom_RAG_full_withpackagedata_category/" | |
# Load Graph-RAG index | |
graph_rag_index = load_index_from_storage( | |
StorageContext.from_defaults(persist_dir=KG_INDEX_PATH) | |
) | |
# Load RAG index | |
rag_index = load_index_from_storage( | |
StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH) | |
) | |
def query_tqa(query, search_level): | |
""" | |
Query both Graph-RAG and RAG models and return their responses and references. | |
Args: | |
query (str): The query to be processed. | |
search_level (str): The level of search to be performed. | |
Returns: | |
tuple: Responses and references from both Graph-RAG and RAG models. | |
""" | |
grag_response, grag_reference, grag_reference_text = query_graph_rag_qa(query, search_level) | |
rag_response, rag_reference, rag_reference_text = query_rag_qa(query, search_level) | |
return grag_response, grag_reference, grag_reference_text, rag_response, rag_reference, rag_reference_text | |
def eval_llm(query, rag_response, grag_response): | |
""" | |
Evaluate responses from both Graph-RAG and RAG models using an LLM. | |
Args: | |
query (str): The query that was used to generate the responses. | |
rag_response (str): The response from the RAG model. | |
grag_response (str): The response from the Graph-RAG model. | |
Returns: | |
tuple: Evaluation results for both responses. | |
""" | |
grag_eval = evaluate_llm(query, grag_response) | |
rag_eval = evaluate_llm(query, rag_response) | |
return grag_eval, rag_eval | |
with gr.Blocks() as demo: | |
gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>") | |
with gr.Tab("Virtual Assistant"): | |
with gr.Row(): | |
query_input = gr.Textbox(label="Input Your Query..") | |
search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level") | |
ask_button = gr.Button("Ask TelcomVA!!") | |
with gr.Row(): | |
with gr.Accordion("Graph-RAG!", open=True): | |
grag_output = gr.Textbox(label="Response") | |
grag_reference = gr.Textbox(label="Triplets") | |
grag_reference_text = gr.Textbox(label="Extracted Reference raw") | |
with gr.Accordion("RAG", open=True): | |
rag_output = gr.Textbox(label="Response") | |
rag_reference = gr.Textbox(label="Extracted Reference") | |
rag_reference_text = gr.Textbox(label="Extracted Reference raw") | |
with gr.Row(): | |
grag_performance = gr.Textbox(label="Graph-RAG Performance") | |
rag_performance = gr.Textbox(label="RAG Performance") | |
eval_button = gr.Button("Evaluate LLMs!!") | |
with gr.Accordion("Explore KG!", open=False): | |
kg_output = gr.HTML() | |
plot_button = gr.Button("Plot Full KG!!") | |
ask_button.click(query_tqa, | |
inputs=[query_input,search_level], | |
outputs=[ | |
grag_output, | |
grag_reference, | |
grag_reference_text, | |
rag_output, | |
rag_reference, | |
rag_reference_text | |
] | |
) | |
eval_button.click(eval_llm, | |
inputs=[query_input,rag_output,grag_output], | |
outputs=[ | |
grag_performance, | |
rag_performance | |
] | |
) | |
plot_button.click(plot_full_kg, outputs=kg_output) | |
examples = gr.Examples( | |
examples=[ | |
["what are the upselling ideas for roaming package you can recommend for customer Rina Wati."], | |
], | |
inputs=[query_input] | |
) | |
demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True) | |