Spaces:
Running
Running
File size: 518 Bytes
2b996da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
from PIL import Image
from transformers import BlipProcessor , BlipForConditionalGeneration
import torch
processor = BlipProcessor.from_pretrained("src/models/Caption")
model = BlipForConditionalGeneration.from_pretrained("src/models/Caption")
def generateCaption(image_path):
image = Image.open(image_path).convert("RGB")
inputs = processor(images = image , return_tensors="pt")
output = model.generate(**inputs)
caption = processor.decode(output[0], skip_special_tokens = True)
return caption |