Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -42,7 +42,7 @@ def initialize(pdf_file, question):
|
|
42 |
# Generate answer using GeminiPro model
|
43 |
# Load the GeminiPro model
|
44 |
model = genai.GenerativeModel('gemini-pro')
|
45 |
-
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
46 |
|
47 |
# Ensure your LLM object (model) inherits from Runnable and can generate text
|
48 |
|
@@ -54,7 +54,7 @@ def initialize(pdf_file, question):
|
|
54 |
|
55 |
# Generate the answer using load_qa_chain
|
56 |
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
57 |
-
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
58 |
stuff_answer = stuff_chain(input_data, return_only_outputs=True)
|
59 |
|
60 |
# Extract the answer
|
|
|
42 |
# Generate answer using GeminiPro model
|
43 |
# Load the GeminiPro model
|
44 |
model = genai.GenerativeModel('gemini-pro')
|
45 |
+
# model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
46 |
|
47 |
# Ensure your LLM object (model) inherits from Runnable and can generate text
|
48 |
|
|
|
54 |
|
55 |
# Generate the answer using load_qa_chain
|
56 |
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
57 |
+
# stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
58 |
stuff_answer = stuff_chain(input_data, return_only_outputs=True)
|
59 |
|
60 |
# Extract the answer
|