Spaces:
Sleeping
Sleeping
File size: 2,848 Bytes
cc82b37 82c6cf9 13dc352 82c6cf9 cc82b37 020ff2f cc82b37 c0559fe 6bd6468 82c6cf9 9751da0 9c5ca00 4a0ed22 003eaf6 4a0ed22 003eaf6 638394b 13dc352 638394b 4a0ed22 638394b 003eaf6 4a0ed22 9751da0 82c6cf9 9751da0 82c6cf9 020ff2f 6bd6468 82c6cf9 27f2b4b 6bd6468 82c6cf9 cc82b37 919751f 003eaf6 4a0ed22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import os
from langchain_core.prompts import PromptTemplate
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
from langchain_community.document_loaders import PyPDFLoader
import google.generativeai as genai
import gradio as gr
# Function for initialization
def initialize(pdf_file, question):
try:
# Access the uploaded file information from Gradio
file_info = pdf_file
# Check if a file was uploaded
if file_info is not None:
# Construct potential file path based on temporary directory and filename
file_path = os.path.join("/tmp", file_info.name) # Adjust temporary directory if needed
if os.path.exists(file_path):
# Process the PDF
pdf_loader = PyPDFLoader(file_path)
pages = pdf_loader.load_and_split()
processed_context = "\n".join(str(page.page_content) for page in pages[:30]) # Limit to first 30 pages
# Configure Google Generative AI (replace with your API key)
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Prompt template for formatting context and question
prompt_template = """Answer the question as precise as possible using the provided context. If the answer is not contained in the context, say "answer not available in context"
Context:
{context}
Question:
{question}
Answer:
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
# Load the GeminiPro model
model = genai.GenerativeModel('gemini-pro')
# ... rest of your code for processing context and question
# Generate answer using GeminiPro's predict method (replace with the appropriate method)
generated_answer = model.predict(inputs=prompt) # Assuming a 'predict' method
# Extract the answer (parse the output from 'predict')
# ... (implementation depends on the model's output format)
return generated_answer
else:
return "Error: The uploaded file could not be found."
else:
return "Error: No PDF file was uploaded."
except Exception as e:
return f"An error occurred: {e}" # Generic error handling
# Create a Gradio interface
interface = gr.Interface(
fn=initialize,
inputs=[
gr.File(label="Upload PDF"), # No need for 'type' argument
gr.Textbox(label="Question")
],
outputs="text",
title="GeminiPro Q&A Bot",
description="Ask questions about the uploaded PDF document.",
)
# Launch the interface
interface.launch()
|