saritha commited on
Commit
93c72f6
·
verified ·
1 Parent(s): 684b24a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -11
app.py CHANGED
@@ -39,23 +39,19 @@ def initialize(pdf_file, question):
39
 
40
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
41
 
42
- # Generate answer using GeminiPro model
43
  # Load the GeminiPro model
44
  model = genai.GenerativeModel('gemini-pro')
45
- # model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
46
 
47
- # Ensure your LLM object (model) inherits from Runnable and can generate text
 
 
48
 
49
- # Prepare the input data
50
- input_data = {
51
- "context": processed_context,
52
- "question": question
53
- }
54
 
55
- # Generate the answer using load_qa_chain
56
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
57
- # stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
58
- stuff_answer = stuff_chain(input_data, return_only_outputs=True)
59
 
60
  # Extract the answer
61
  generated_answer = stuff_answer['output_text']
 
39
 
40
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
41
 
 
42
  # Load the GeminiPro model
43
  model = genai.GenerativeModel('gemini-pro')
 
44
 
45
+ # Debugging: Print object type and attributes
46
+ print(type(model))
47
+ print(dir(model)) # List attributes and methods
48
 
49
+ # Check if the model has a 'generate' method (or similar)
50
+ if not hasattr(model, 'generate'):
51
+ raise Exception("Your LLM object might not have a text generation method!")
 
 
52
 
 
53
  stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
54
+ stuff_answer = stuff_chain(input_data={"context": processed_context, "question": question}, return_only_outputs=True)
 
55
 
56
  # Extract the answer
57
  generated_answer = stuff_answer['output_text']