import os import gradio as gr import nest_asyncio from llama_index.core import StorageContext, load_index_from_storage from telcom_core import query_rag_qa, query_graph_rag_qa, plot_full_kg, evaluate_llm, parse_evaluation_regex # Define constants for index paths os.environ["OPENAI_API_KEY"] = os.getenv('oai') KG_INDEX_PATH = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/" KG_PLOT_PATH = KG_INDEX_PATH + "/full_kg.html" RAG_INDEX_PATH = "./telcom_RAG_full_withpackagedata_category/" # Load Graph-RAG index graph_rag_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=KG_INDEX_PATH) ) # Load RAG index rag_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH) ) def query_tqa(query, search_level): """ Query both Graph-RAG and RAG models and return their responses and references. Args: query (str): The query to be processed. search_level (str): The level of search to be performed. Returns: tuple: Responses and references from both Graph-RAG and RAG models. """ grag_response, grag_reference, grag_reference_text = query_graph_rag_qa(graph_rag_index, query, search_level) rag_response, rag_reference, rag_reference_text = query_rag_qa(rag_index, query, search_level) return grag_response, grag_reference, grag_reference_text, rag_response, rag_reference, rag_reference_text def eval_llm(query, rag_response, grag_response): """ Evaluate responses from both Graph-RAG and RAG models using an LLM. Args: query (str): The query that was used to generate the responses. rag_response (str): The response from the RAG model. grag_response (str): The response from the Graph-RAG model. Returns: tuple: Evaluation results for both responses. """ grag_eval = evaluate_llm(query, grag_response) rag_eval = evaluate_llm(query, rag_response) return parse_evaluation_regex(grag_eval), parse_evaluation_regex(rag_eval) with gr.Blocks() as demo: gr.Markdown("