Spaces:
Sleeping
Sleeping
File size: 1,711 Bytes
4045119 d1621f9 4045119 dbe03eb bbebd0b 4045119 d4b5a4f 2dfe832 a994101 4045119 ab35ff1 4045119 c50ec7a ab35ff1 c50ec7a d1621f9 bafda05 c50ec7a 4045119 355ae1b 488ba19 4045119 f757205 4045119 c50ec7a 4045119 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
import openai
from baseline_code import *
# Model-specific response logic
def get_response(message, chat_history, model_choice, api_key):
chat_history.append({'role':"user","content":message})
reply =generate_answer(message,model_choice,api_key)
chat_history.append({"role":"assistant","content": reply})
print("Chat",chat_history)
return reply,chat_history
# Gradio app
with gr.Blocks() as app:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## βοΈ Model Configuration")
model_choice = gr.Dropdown(
label="Choose Model",
choices=["OpenAI GPT-4", "Gemini 1.5Flash", "Gemini 2.0Flash", "Together AI"],
value= "Gemini 1.5Flash"
)
api_key_input = gr.Textbox(
label="Enter API Key",
placeholder="Enter your OpenAI API key here...",
type="password"
)
with gr.Column(scale=3):
gr.Markdown("<h1>βοΈ AirlineX Airways Virtual Assistant</h1>")
chatbot = gr.Chatbot(label="Assistant", height=700,type="messages")
msg = gr.Textbox(label="Your Message")
state = gr.State([])
gr.Markdown("π **Note:** Your API key is used only for this session and is not stored or logged.")
# Main logic handler
def respond(message, chat_history, model_choice, api_key):
reply, chat_history = get_response(message, chat_history, model_choice, api_key)
return chat_history, chat_history
msg.submit(
respond,
[msg, state, model_choice, api_key_input],
[chatbot, state]
)
app.launch()
|