Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,36 @@
|
|
1 |
from transformers import BlipForQuestionAnswering, AutoProcessor
|
2 |
from PIL import Image
|
3 |
import gradio as gr
|
4 |
-
import
|
5 |
|
6 |
# Load the BLIP model and processor
|
7 |
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
|
8 |
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
GROQ_API_KEY = "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" # Replace with your actual Groq API key
|
13 |
|
14 |
-
# Function to generate the initial answer with BLIP and expand it with
|
15 |
def qna(image, question):
|
16 |
# Step 1: Get initial short answer from BLIP
|
17 |
inputs = processor(image, question, return_tensors="pt")
|
18 |
out = model.generate(**inputs)
|
19 |
short_answer = processor.decode(out[0], skip_special_tokens=True)
|
20 |
|
21 |
-
# Step 2: Construct prompt for
|
22 |
prompt = f"Question: {question}\nShort Answer: {short_answer}\nProvide a detailed explanation based on this answer."
|
23 |
|
24 |
-
# Step 3: Send prompt to
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
detailed_answer = "Failed to get response from Groq API."
|
36 |
|
37 |
return detailed_answer
|
38 |
|
|
|
1 |
from transformers import BlipForQuestionAnswering, AutoProcessor
|
2 |
from PIL import Image
|
3 |
import gradio as gr
|
4 |
+
import openai # For OpenAI API
|
5 |
|
6 |
# Load the BLIP model and processor
|
7 |
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
|
8 |
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
9 |
|
10 |
+
# Set your OpenAI API key
|
11 |
+
openai.api_key = "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" # Replace with your OpenAI API key
|
|
|
12 |
|
13 |
+
# Function to generate the initial answer with BLIP and expand it with OpenAI API
|
14 |
def qna(image, question):
|
15 |
# Step 1: Get initial short answer from BLIP
|
16 |
inputs = processor(image, question, return_tensors="pt")
|
17 |
out = model.generate(**inputs)
|
18 |
short_answer = processor.decode(out[0], skip_special_tokens=True)
|
19 |
|
20 |
+
# Step 2: Construct prompt for OpenAI API
|
21 |
prompt = f"Question: {question}\nShort Answer: {short_answer}\nProvide a detailed explanation based on this answer."
|
22 |
|
23 |
+
# Step 3: Send prompt to OpenAI API for a paragraph-length answer
|
24 |
+
try:
|
25 |
+
response = openai.Completion.create(
|
26 |
+
engine="text-davinci-003", # Specify model
|
27 |
+
prompt=prompt,
|
28 |
+
max_tokens=200 # Adjust max_tokens as needed for response length
|
29 |
+
)
|
30 |
+
detailed_answer = response.choices[0].text.strip()
|
31 |
+
except Exception as e:
|
32 |
+
print(f"Exception occurred: {e}")
|
33 |
+
detailed_answer = "Failed to get response from OpenAI API."
|
|
|
34 |
|
35 |
return detailed_answer
|
36 |
|