Spaces:
Sleeping
Sleeping
File size: 5,786 Bytes
5f1f9d6 d5a2742 5f1f9d6 1cc3f83 5f1f9d6 d5a2742 5f1f9d6 d5a2742 5f1f9d6 1cc3f83 d5a2742 5f1f9d6 d5a2742 5f1f9d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import streamlit as st
import base64
import json
from huggingface_hub import InferenceClient
# Function to read the image file and return a base64-encoded string
def get_image_base64(image_file):
return base64.b64encode(image_file.read()).decode('utf-8')
# Streamlit page setup
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="auto")
# Add the image with a specified width
image_width = 300 # Set the desired width in pixels
st.image('MTSS.ai_Logo.png', width=image_width)
st.header('VisionTexts™ | Accessibility')
st.subheader('Image Alt Text Creator')
# Initialize the Hugging Face InferenceClient with the API key from secrets
client = InferenceClient(api_key=st.secrets["huggingface_api_key"])
# File uploader
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
if uploaded_file:
# Display the uploaded image with specified width
image_width = 200 # Set the desired width in pixels
with st.expander("Image", expanded=True):
st.image(uploaded_file, caption=uploaded_file.name, width=image_width, use_column_width=False)
# Toggle for showing additional details input
show_details = st.checkbox("Add details about the image.", value=False)
if show_details:
# Text input for additional details about the image
additional_details = st.text_area(
"The details could include specific information that is important to include in the alt text or reflect why the image is being used:",
)
# Toggle for modifying the prompt for complex images
complex_image = st.checkbox("Is this a complex image?", value=False)
if complex_image:
# Caption explaining the impact of the complex image toggle
st.caption(
"By clicking this toggle, it will instruct the app to create a description that exceeds the 125-character limit. "
"Add the description in a placeholder behind the image and 'Description in the content placeholder' in the alt text box."
)
# Button to trigger the analysis
analyze_button = st.button("Analyze the Image")
# Optimized prompt for complex images
complex_image_prompt_text = (
"As an expert in image accessibility and alternative text, thoroughly describe the image provided. "
"Provide a brief description using not more than 500 characters that conveys the essential information in eight or fewer clear and concise sentences. "
"Skip phrases like 'image of' or 'picture of.' "
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative."
)
# Check if an image has been uploaded and if the analyze button has been pressed
if uploaded_file is not None and analyze_button:
with st.spinner("Analyzing the image ..."):
# Get base64-encoded image string
image_bytes = uploaded_file.read()
base64_image_string = base64.b64encode(image_bytes).decode('utf-8')
# Detect the image content type
import imghdr
image_type = imghdr.what(None, h=image_bytes)
if image_type is None:
st.error("Unsupported image type. Please upload a JPEG or PNG image.")
else:
content_type = f"image/{image_type}"
# Determine which prompt to use based on the complexity of the image
if complex_image:
prompt_text = complex_image_prompt_text
else:
prompt_text = (
"As an expert in image accessibility and alternative text, succinctly describe the image provided in less than 125 characters. "
"Provide a brief description using not more than 125 characters that conveys the essential information in three or fewer clear and concise sentences for use as alt text. "
"Skip phrases like 'image of' or 'picture of.' "
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points and newlines, focusing on creating a seamless narrative for accessibility purposes."
)
if show_details and additional_details:
prompt_text += (
f"\n\nInclude the additional context provided by the user in your description:\n{additional_details}"
)
# Create the payload for the completion request
messages = [
{
"role": "user",
"content": prompt_text,
}
]
# Attachments array containing the image
attachments = [
{
"type": "image",
"content": base64_image_string,
"content_type": content_type,
}
]
# Make the request to the Hugging Face API
try:
# Send the request to the model
completion = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
attachments=attachments,
max_tokens=500
)
# Extract the assistant's response
assistant_response = completion.choices[0].message['content']
# Display the response
st.markdown(assistant_response)
st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
except Exception as e:
st.error(f"An error occurred: {e}")
else:
# Warning for user action required
if not uploaded_file and analyze_button:
st.warning("Please upload an image.") |