saritha commited on
Commit
4a0ed22
·
verified ·
1 Parent(s): 003eaf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -4
app.py CHANGED
@@ -19,16 +19,43 @@ def initialize(pdf_file, question):
19
  # Process the PDF
20
  pdf_loader = PyPDFLoader(file_path)
21
  pages = pdf_loader.load_and_split()
22
- context = "\n".join(str(page.page_content) for page in pages[:30]) # Limit to first 30 pages
23
 
24
  # Configure Google Generative AI (replace with your API key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  model = genai.GenerativeModel('gemini-pro')
26
- model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
27
 
 
 
 
 
 
 
 
28
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
29
- stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
 
 
 
30
 
31
- return stuff_answer['output_text']
32
  else:
33
  return "Error: The uploaded file could not be found."
34
  else:
@@ -56,3 +83,5 @@ interface.launch()
56
 
57
 
58
 
 
 
 
19
  # Process the PDF
20
  pdf_loader = PyPDFLoader(file_path)
21
  pages = pdf_loader.load_and_split()
22
+ processed_context = "\n".join(str(page.page_content) for page in pages[:30]) # Limit to first 30 pages
23
 
24
  # Configure Google Generative AI (replace with your API key)
25
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
26
+
27
+ # Prompt template for formatting context and question
28
+ prompt_template = """Answer the question as precise as possible using the provided context. If the answer is not contained in the context, say "answer not available in context"
29
+
30
+ Context:
31
+ {context}
32
+
33
+ Question:
34
+ {question}
35
+
36
+ Answer:
37
+ """
38
+
39
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
40
+
41
+ # Generate answer using GeminiPro model
42
+ # Load the GeminiPro model
43
  model = genai.GenerativeModel('gemini-pro')
 
44
 
45
+ # Prepare the input data
46
+ input_data = {
47
+ "context": processed_context,
48
+ "question": question
49
+ }
50
+
51
+ # Generate the answer using load_qa_chain
52
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
53
+ stuff_answer = stuff_chain(input_data, return_only_outputs=True)
54
+
55
+ # Extract the answer
56
+ generated_answer = stuff_answer['output_text']
57
 
58
+ return generated_answer
59
  else:
60
  return "Error: The uploaded file could not be found."
61
  else:
 
83
 
84
 
85
 
86
+
87
+