File size: 4,892 Bytes
b8374b8
28a736c
 
2967d61
 
75c93bc
5aa3509
28a736c
68b45b6
 
 
 
 
28a736c
68b45b6
5aa3509
68b45b6
28a736c
 
68b45b6
5aa3509
68b45b6
5aa3509
28a736c
68b45b6
5aa3509
68b45b6
5aa3509
68b45b6
 
 
5aa3509
68b45b6
 
5aa3509
b8374b8
 
68b45b6
5aa3509
 
5c514ba
68b45b6
 
 
5aa3509
68b45b6
 
 
 
5aa3509
68b45b6
 
 
5c514ba
 
28a736c
 
68b45b6
28a736c
 
 
 
 
 
5aa3509
 
68b45b6
5c514ba
28a736c
5aa3509
 
 
5c514ba
 
5aa3509
 
 
 
5c514ba
 
68b45b6
 
28a736c
5aa3509
5c514ba
5aa3509
28a736c
5c514ba
 
 
28a736c
 
 
 
 
5aa3509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c514ba
 
 
 
 
 
 
 
5aa3509
 
 
28a736c
5aa3509
70094a6
 
 
e798f23
70094a6
 
 
5c514ba
7a543a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import os
import gradio as gr
import nest_asyncio
from llama_index.core import StorageContext, load_index_from_storage

from telcom_core import query_rag_qa, query_graph_rag_qa, plot_full_kg, evaluate_llm, parse_evaluation_regex,reasoning_graph


# Define constants for index paths
os.environ["OPENAI_API_KEY"] = os.getenv('oai')
KG_INDEX_PATH = "./telcom_full_property_kg_processed_dynamicpath2_withpackagedata_category/"
KG_PLOT_PATH = KG_INDEX_PATH + "/full_kg.html"
RAG_INDEX_PATH = "./telcom_RAG_full_withpackagedata_category/"

# Load Graph-RAG index
graph_rag_index = load_index_from_storage(
    StorageContext.from_defaults(persist_dir=KG_INDEX_PATH)
)

# Load RAG index
rag_index = load_index_from_storage(
    StorageContext.from_defaults(persist_dir=RAG_INDEX_PATH)
)

def query_tqa(query, search_level):
    """
    Query both Graph-RAG and RAG models and return their responses and references.

    Args:
        query (str): The query to be processed.
        search_level (str): The level of search to be performed.

    Returns:
        tuple: Responses and references from both Graph-RAG and RAG models.
    """
    grag_response, grag_reference, grag_reference_text = query_graph_rag_qa(graph_rag_index, query, search_level)
    rag_response, rag_reference, rag_reference_text = query_rag_qa(rag_index, query, search_level)    
    return grag_response, grag_reference, grag_reference_text, rag_response, rag_reference, rag_reference_text



def eval_llm(query, rag_response, grag_response):
    """
    Evaluate responses from both Graph-RAG and RAG models using an LLM.

    Args:
        query (str): The query that was used to generate the responses.
        rag_response (str): The response from the RAG model.
        grag_response (str): The response from the Graph-RAG model.

    Returns:
        tuple: Evaluation results for both responses.
    """
    eval_text = evaluate_llm(query, grag_response, rag_response)
    return eval_text #, parse_evaluation_regex(grag_eval)


 
with gr.Blocks() as demo:
    gr.Markdown("<h1>Telcom Graph-RAG v0.1</h1>")

    with gr.Tab("Virtual Assistant"):
        with gr.Row():
            query_input = gr.Textbox(label="Input Your Query..")
            search_level = gr.Slider(minimum=1, maximum=50, value=3, step=5, label="Search level")
            ask_button = gr.Button("Ask TelcomVA!!")


        with gr.Row():
            with gr.Accordion("Graph-RAG!", open=True):
                grag_output = gr.Textbox(label="Response")
                grag_reference = gr.Textbox(label="Triplets")
                with gr.Accordion("Extracted Reference raw", open=False):
                    grag_reference_text = gr.Textbox(label="Extracted Reference raw")

            with gr.Accordion("RAG", open=True):
                rag_output = gr.Textbox(label="Response")
                rag_reference = gr.Textbox(label="Extracted Reference")
                with gr.Accordion("Extracted Reference raw", open=False):
                    rag_reference_text = gr.Textbox(label="Extracted Reference raw")


        with gr.Row():
            grag_performance = gr.Textbox(label="Graph-RAG Performance")
            # rag_performance = gr.HTML(label="RAG Performance")
        eval_button = gr.Button("Evaluate LLMs!!")

        with gr.Row():
            grag_reasoning = gr.Textbox(label="Graph-RAG Reasoning")
        reason_button = gr.Button("Get Graph Reasoning!!")

    with gr.Accordion("Explore KG!", open=False):
        kg_output = gr.HTML()
        plot_button = gr.Button("Plot Full KG!!")

    ask_button.click(query_tqa, 
                     inputs=[query_input,search_level], 
                     outputs=[
                         grag_output,
                         grag_reference,
                         grag_reference_text,
                         rag_output,
                         rag_reference,
                         rag_reference_text
                         ]
    )
    
    eval_button.click(eval_llm,
                      inputs=[query_input,rag_output,grag_output],
                      outputs=[
                          grag_performance,
                        #   rag_performance
                      ]
    )

    reason_button.click(reasoning_graph,
                      inputs=[query_input,grag_output,grag_reference],
                      outputs=[
                          grag_reasoning
                      ]
    )

    plot_button.click(plot_full_kg, outputs=kg_output)

    examples = gr.Examples(
    examples=[
        ["what are the upselling ideas for roaming package you can recommend for customer Rina Wati."],
        ["My phone number is 81226808748, i am traveling and want to get some good travel roaming plans. Give me list of plans per GB usage and cost."]
    ],
    inputs=[query_input]
    )

demo.launch(auth=(os.getenv('id'), os.getenv('pass')), share=True)