Gradio / app.py
ajalisatgi's picture
Update app.py
fe3dd0b verified
raw
history blame
2.34 kB
import gradio as gr
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
import openai
import torch
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize OpenAI API key
openai.api_key = 'YOUR_API_KEY' # Replace with your API key
def process_query(query):
try:
# Log query processing
logger.info(f"Processing query: {query}")
# Get relevant documents
relevant_docs = vectordb.similarity_search(query, k=30)
context = " ".join([doc.page_content for doc in relevant_docs])
# Add delay to respect API rate limits
time.sleep(1)
# Generate response using OpenAI
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Given the document: {context}\n\nGenerate a response to the query: {query}"}
],
max_tokens=300,
temperature=0.7,
)
answer = response.choices[0].message.content.strip()
logger.info("Successfully generated response")
return answer
except Exception as e:
logger.error(f"Error processing query: {str(e)}")
return f"Here's what went wrong: {str(e)}"
# Enhanced Gradio interface
demo = gr.Interface(
fn=process_query,
inputs=[
gr.Textbox(
label="Enter your question",
placeholder="Type your question here...",
lines=2
)
],
outputs=[
gr.Textbox(
label="Answer",
lines=5
)
],
title="RAG-Powered Question Answering System",
description="Ask questions and get answers based on the embedded document knowledge.",
examples=[
["What role does T-cell count play in severe human adenovirus type 55 (HAdV-55) infection?"],
["In what school district is Governor John R. Rogers High School located?"],
["Is there a functional neural correlate of individual differences in cardiovascular reactivity?"]
]
)
# Launch with debugging enabled
if __name__ == "__main__":
demo.launch(debug=True)