Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import base64 | |
from llama_index.core import StorageContext, load_index_from_storage | |
from dotenv import load_dotenv | |
from retrieve import get_latest_dir, get_latest_html_file | |
from graph_handler import query_graph_qa, plot_subgraph | |
from embed_handler import query_rag_qa | |
from evaluate import evaluate_llm, reasoning_graph, get_coupon | |
import base64 | |
import time | |
load_dotenv() | |
KG_INDEX_PATH = get_latest_dir(os.getenv("GRAPH_DIR")) | |
KG_PLOT_PATH = get_latest_html_file(os.getenv("GRAPH_VIS")) | |
RAG_INDEX_PATH = get_latest_dir(os.getenv("EMBEDDING_DIR")) | |
# Load Graph-RAG index | |
graph_rag_index = load_index_from_storage( | |
StorageContext.from_defaults(persist_dir=KG_INDEX_PATH) | |
) | |
# Load RAG index | |
rag_index = load_index_from_storage( | |
StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH) | |
) | |
def query_tqa(query, search_level): | |
""" | |
Query the Graph-RAG and RAG models for a given query. | |
Args: | |
query (str): The query to ask the RAGs. | |
search_level (int): The max search level to use for the Graph RAG. | |
Returns: | |
tuple: The response, reference, and reference text for the Graph-RAG and RAG models. | |
""" | |
if not query.strip(): | |
raise gr.Error("Please enter a query before asking.") | |
grag_response, grag_reference, grag_reference_text = query_graph_qa( | |
graph_rag_index, query, search_level | |
) | |
# rag_response, rag_reference, rag_reference_text = query_rag_qa( | |
# rag_index, query, search_level | |
# ) | |
return ( | |
grag_response, | |
# grag_reference, | |
# grag_reference_text, | |
# rag_response, | |
# rag_reference, | |
# rag_reference_text, | |
) | |
def show_graph(): | |
""" | |
Show the latest graph visualization in an iframe. | |
Returns: | |
str: The HTML content to display the graph visualization in an iframe. | |
""" | |
graph_vis_dir = os.getenv("GRAPH_VIS", "graph_vis") | |
try: | |
latest_graph = get_latest_html_file(graph_vis_dir) | |
if latest_graph: | |
with open(latest_graph, "r", encoding="utf-8") as f: | |
html_content = f.read() | |
encoded_html = base64.b64encode(html_content.encode()).decode() | |
iframe_html = f'<iframe src="data:text/html;base64,{encoded_html}" width="100%" height="1000px" frameborder="0"></iframe>' | |
return iframe_html | |
else: | |
return "No graph visualization found." | |
except Exception as e: | |
return f"Error: {str(e)}" | |
with gr.Blocks() as demo: | |
gr.Markdown("# Comfy Virtual Assistant") | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(label="Input Your Query") | |
clear = gr.ClearButton([msg, chatbot]) | |
def respond(message, chat_history): | |
bot_message = query_tqa(message, 2) | |
chat_history.append((message, bot_message)) | |
time.sleep(1) | |
return "", chat_history | |
msg.submit(respond, [msg, chatbot], [msg, chatbot]) | |
with gr.Row(): | |
plot_button = gr.Button("Plot Knowledge Graph", variant="secondary") | |
kg_output = gr.HTML() | |
demo.launch(auth=(os.getenv("ID"), os.getenv("PASS")), share=False) | |