Spaces:
Build error
Build error
File size: 3,671 Bytes
a5a8279 73432f0 a5a8279 73432f0 a5a8279 73432f0 3f14ec0 a5a8279 e9c5287 73432f0 a5a8279 73432f0 a5a8279 73432f0 a5a8279 73432f0 a5a8279 73432f0 a5a8279 3f14ec0 a5a8279 73432f0 a5a8279 b82e12b a5a8279 73432f0 a5a8279 98f537d a5a8279 732cc0f 73432f0 3f14ec0 73432f0 4068977 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import os
import json
import gradio as gr
from utils.response_manager import ResponseManager # Import the ResponseManager class
"""
This script sets up a Gradio interface to host an AI chatbot using RAG (Retrieval-Augmented Generation)
to provide responses to user queries. Response API from OpenAI is used for both retrieval and generation of responses.
"""
# Vector store ID for the retrieval of knowledge base documents
# Load the vector store ID from the environment variable
vector_store_id = os.getenv('VECTOR_STORE_ID')
# Check if the VECTOR_STORE_ID environment variable is set
if not vector_store_id:
raise ValueError("VECTOR_STORE_ID environment variable is not set.")
# Initialize the ResponseManager with the vector store ID
response_manager = ResponseManager(vector_store_id)
# Set parameters for the response generation
model = "gpt-4o-mini" # Set the model to be used for response generation
temperature=0 # Set the temperature for response generation
max_output_tokens=800 # Set the maximum number of output tokens
max_num_results=7 # Set the maximum number of knowledge base documents to return for retrieval
# Load the configuration for Gradio GUI interface from the JSON file
with open('config/gradio_config.json', 'r') as config_file:
config = json.load(config_file)
# Check if the configuration file is loaded successfully
if not config:
raise ValueError("Failed to load the configuration file.")
# Extract the configuration parameters
title = config["chatbot_title"]
description = config["chatbot_description"]
chatbot_input_label = config["chatbot_input_label"]
chatbot_input_placeholder = config["chatbot_input_placeholder"]
chatbot_output_label = config["chatbot_output_label"]
chatbot_output_placeholder = config["chatbot_output_placeholder"]
chatbot_submit_button = config["chatbot_submit_button"]
chatbot_reset_button = config["chatbot_reset_button"]
# Check if the configuration parameters are set correctly
if not all([title, description,
chatbot_input_label, chatbot_input_placeholder,
chatbot_output_label, chatbot_output_placeholder,
chatbot_submit_button, chatbot_reset_button]):
raise ValueError("One or more configuration parameters are missing or empty.")
# Define the chatbot function to handle user queries and generate responses
def chatbot(query: str) -> str:
# """
# Function to handle the chatbot interaction.
# :param query: The user query to respond to.
# :return: The response text from the chatbot.
# """
# try:
# if query.strip():
# response = response_manager.create_response(query, model, temperature, max_output_tokens, max_num_results)
# if not response:
# return "Sorry, I couldn't generate a response at this time. Please try again later."
# # Return the response from the AI model
# return response
# else:
# return "Please enter a valid query."
# except Exception as e:
# return str(e)
return """
## Test Response
- **Bold text**
- *Italic text*
- `Inline code`
```python
# Block code example
print("Hello World")
```
"""
# Create a Gradio GUI interface
inputs = gr.Textbox(lines=7, label=chatbot_input_label, placeholder=chatbot_input_placeholder)
outputs = gr.Markdown(label=chatbot_output_label, placeholder=chatbot_output_placeholder)
iface = gr.Interface(fn=chatbot,
inputs=inputs,
outputs=outputs,
title=title,
description=description)
if __name__ == "__main__":
iface.launch() |