|
def get_image_answer(image: Image.Image, question: str) -> str: |
|
if image.mode != "RGB": |
|
image = image.convert("RGB") |
|
|
|
inputs = processor(images=image, text=question, return_tensors="pt") |
|
|
|
for key in inputs: |
|
if inputs[key].dtype in [torch.float32, torch.float64]: |
|
|
|
inputs[key] = inputs[key].to(device, torch.float16 if device == "cuda" else torch.float32) |
|
else: |
|
|
|
inputs[key] = inputs[key].to(device) |
|
|
|
print("Prompt Passed to VLM:", f"Question: {question} Answer:") |
|
output_ids = model.generate(**inputs) |
|
answer = processor.tokenizer.decode(output_ids[0], skip_special_tokens=True).strip() |
|
|
|
print("Model Response:", answer) |
|
return answer |
|
|