saritha's picture
Update app.py
003eaf6 verified
raw
history blame
2.07 kB
import os
from langchain_core.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain_community.document_loaders import PyPDFLoader
import google.generativeai as genai
import gradio as gr
# Function for initialization
def initialize(pdf_file, question):
try:
# Access the uploaded file information from Gradio
file_info = pdf_file
# Check if a file was uploaded
if file_info is not None:
# Construct potential file path based on temporary directory and filename
file_path = os.path.join("/tmp", file_info.name) # Adjust temporary directory if needed
if os.path.exists(file_path):
# Process the PDF
pdf_loader = PyPDFLoader(file_path)
pages = pdf_loader.load_and_split()
context = "\n".join(str(page.page_content) for page in pages[:30]) # Limit to first 30 pages
# Configure Google Generative AI (replace with your API key)
model = genai.GenerativeModel('gemini-pro')
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
return stuff_answer['output_text']
else:
return "Error: The uploaded file could not be found."
else:
return "Error: No PDF file was uploaded."
except Exception as e:
return f"An error occurred: {e}" # Generic error handling
# Create a Gradio interface
interface = gr.Interface(
fn=initialize,
inputs=[
gr.File(label="Upload PDF"), # No need for 'type' argument
gr.Textbox(label="Question")
],
outputs="text",
title="GeminiPro Q&A Bot",
description="Ask questions about the uploaded PDF document.",
)
# Launch the interface
interface.launch()