786avinash commited on
Commit
98d3845
·
verified ·
1 Parent(s): fc655be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -30
app.py CHANGED
@@ -1,31 +1,10 @@
1
- import gradio as gr
2
- from transformers import BlipForQuestionAnswering, AutoProcessor
3
  from PIL import Image
4
-
5
-
6
- model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
7
- processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
8
-
9
- def answer_question(image, question):
10
-
11
- inputs = processor(image, question, return_tensors="pt")
12
-
13
- out = model.generate(**inputs)
14
-
15
- answer = processor.decode(out[0], skip_special_tokens=True)
16
- return answer
17
-
18
-
19
- iface = gr.Interface(
20
- fn=answer_question,
21
- inputs=[
22
- gr.inputs.Image(type="pil", label="Upload Image"),
23
- gr.inputs.Textbox(label="Enter Your Question")
24
- ],
25
- outputs="text",
26
- title="BLIP Question Answering",
27
- description="Upload an image and ask a question to get an answer."
28
- )
29
-
30
-
31
- iface.launch()
 
1
+ from transformers import BlipForQuestionAnswering
2
+ from transformers import AutoProcessor
3
  from PIL import Image
4
+ import gradio as gr
5
+ def qna(image,question):
6
+ inputs=processor(image,question,return_tensors="pt")
7
+ out=model.generate(**inputs)
8
+ return processor.decode(out[0],skip_special_tokens=True)
9
+ interf=gr.Interface(qna,inputs=["image","text"],outputs="text")
10
+ interf.launch()