import streamlit as st | |
from transformers import pipeline | |
from config import IMAGE_MODEL_NAME | |
def load_image_model(): | |
""" | |
Loads an image captioning model recognized by the HF pipeline. | |
Example: "nlpconnect/vit-gpt2-image-captioning" or "Salesforce/blip-image-captioning-base". | |
""" | |
return pipeline("image-to-text", model=IMAGE_MODEL_NAME) | |
def analyze_image(image_file, image_model): | |
""" | |
Pass an uploaded image to the loaded pipeline for caption generation. | |
""" | |
try: | |
result = image_model(image_file) | |
if isinstance(result, list) and len(result) > 0: | |
return result[0].get("generated_text", "No caption.") | |
return "No output from the model." | |
except Exception as e: | |
return f"Error analyzing image: {str(e)}" | |