File size: 2,520 Bytes
b60b6c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60f31f5
9baf2f1
60f31f5
b60b6c5
 
 
 
 
60f31f5
b60b6c5
 
 
 
 
 
 
 
 
80f878a
b60b6c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from helper import *
from prompts_and_chema import *

def full_chat_pipeline(user_query, openai_key):
    try:
        # βœ… Step 1: Set the OpenAI API key
        openai.api_key = openai_key

        # Step 1: Get intent from first GPT call
        raw_response = chat_with_gpt(intent_system_prompt, user_query, use_schema=True, schema=intent_output_schema )
        # print(f"raw_response: {raw_response}")

        intents = json.loads(raw_response) if isinstance(raw_response, str) else raw_response
        # print(f"Intents: {intents}")

        # Step 2: Format prompt with detected intent
        loaded_texts = load_intent_texts(intents, get_txt_files)
        #selected_intents = ', '.join(intents.get("intents", []))
        selected_intent_description = load_intent_description(intents, intent_description_map)
        print(selected_intent_description)
        context_str = '\n\n'.join(f"{k}:\n{v}" for k, v in loaded_texts.items())
        formatted_prompt = f'''

        User query: {user_query}

        Selected Intents: {selected_intent_description}

        Context: {context_str}

        '''

        # Step 3: Get final answer from second GPT call
        final_response = chat_with_gpt(get_answer_system_prompt, formatted_prompt)
        # print("Final Response:", final_response)

        return f"{str(intents)}", f"{final_response}"

    except Exception as e:
        return f"Error while classifying intents: {str(e)}", f"Error while generating response: {str(e)}"


# Gradio UI
with gr.Blocks(title="Degirum LLM") as demo:
    gr.Markdown("## Degirum LLM")
    gr.Markdown("Ask questions...")
    # πŸ”‘ OpenAI Key on top
    openai_key = gr.Textbox(label="πŸ”‘ OpenAI API Key", placeholder="Enter your OpenAI API key...", type="password")

    # Row with query input and intent output side by side
    with gr.Row():
        user_query = gr.Textbox(label="πŸ’¬ Query", placeholder="Type your question here...", lines=3)
        intent_output = gr.Textbox(label="🧠 Classified Intents", placeholder="will get answer by AI", lines=3, interactive=False)

    # πŸš€ Submit Button
    submit_btn = gr.Button("πŸš€ Submit", variant="primary")

    # πŸ€– AI Response at the bottom
    response_output = gr.Textbox(label="πŸ€– Full AI Response", lines=10, interactive=False)

    submit_btn.click(
        fn=full_chat_pipeline,
        inputs=[user_query, openai_key],
        outputs=[intent_output, response_output]
    )


if __name__ == "__main__":
    demo.launch()