File size: 5,001 Bytes
f76d77f
 
ed9a81c
ea6a4e1
f76d77f
 
 
 
 
047420b
a507e51
a574f9a
 
 
a507e51
b376142
a574f9a
a507e51
b3484d2
047420b
f76d77f
b3484d2
f76d77f
 
a2d596c
61752c9
99e69db
61752c9
 
a2d596c
b3484d2
02fb06a
 
a2d596c
4996d49
 
a2d596c
 
 
 
 
4996d49
a688783
 
8e253a5
f600388
235a4b4
b3484d2
8e253a5
4996d49
8e253a5
 
4996d49
fb5451f
d83bda5
 
fb5451f
4996d49
b3484d2
4996d49
235a4b4
 
 
b3484d2
4996d49
 
 
 
 
 
 
 
 
 
 
 
 
b3484d2
4996d49
 
235a4b4
 
 
 
 
 
 
4996d49
235a4b4
 
4996d49
fb5451f
4996d49
61752c9
 
 
 
 
 
 
4996d49
 
 
235a4b4
a688783
 
 
 
4996d49
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import streamlit as st
import base64
import openai 

# Function to encode the image to base64
def encode_image(image_file):
    return base64.b64encode(image_file.getvalue()).decode("utf-8")

# Streamlit page setup
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="collapsed")

#Add the image with a specified width
image_width = 300  # Set the desired width in pixels
st.image('MTSS.ai_Logo.png', width=image_width)

st.title('VisionText™ | Accessibility')
st.subheader(':green[_Image Alt Text Generator_]')

# Retrieve the OpenAI API Key from secrets
openai.api_key = st.secrets["openai_api_key"]

# File uploader allows user to add their own image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])

if uploaded_file:
    # Display the uploaded image with specified width
    image_width = 150  # Set the desired width in pixels
    with st.expander("Image", expanded=True):
        st.image(uploaded_file, caption=uploaded_file.name, width=image_width, use_column_width=False)

# Toggle for showing additional details input
show_details = st.toggle("Optional: Add details about the image. " 
                         "The details could include specific information that is important to include in the alt text or reflect why the image is being used.", value=False)

if show_details:
    # Text input for additional details about the image, shown only if toggle is True
    additional_details = st.text_area(
        "Add any additional details or context about the image here:",
        disabled=not show_details
    )

# Button to trigger the analysis
analyze_button = st.button("Analyze the Image", type="secondary")

# Check if an image has been uploaded, if the API key is available, and if the button has been pressed
if uploaded_file is not None and analyze_button:

    with st.spinner("Analyzing the image ..."):
        # Encode the image
        base64_image = encode_image(uploaded_file)
    
        # Optimized prompt for additional clarity and detail
        prompt_text = (
            "As an expert in image accessibility and alternative text, succinctly describe the image provided in less than 150 characters. "
            "Provide a brief description that conveys the essential information conveyed by the image in three or fewer clear and concise sentences for use as alt text."
            "Skip phrases like 'image of' or 'picture of.'"
            "Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative that serves as effective alternative text for accessibility purposes."
        )
    
        if show_details and additional_details:
            prompt_text += (
                f"\n\nAdditional Context Provided by the User:\n{additional_details}"
            )
    
        # Create the payload for the completion request
        messages = [
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt_text},
                    {
                        "type": "image_url",
                        "image_url": f"data:image/jpeg;base64,{base64_image}",
                    },
                ],
            }
        ]
    
        # Make the request to the OpenAI API
        try:
            # Without Stream
            
            # response = openai.chat.completions.create(
            #     model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
            # )
    
            # Stream the response
            full_response = ""
            message_placeholder = st.empty()
            for completion in openai.chat.completions.create(
                model="gpt-4-vision-preview", messages=messages, 
                max_tokens=250, stream=True
            ):
            #     # Check if there is content to display
            #     if completion.choices[0].delta.content is not None:
            #         full_response += completion.choices[0].delta.content
            #         message_placeholder.markdown(full_response + "▌")
            # # Final update to placeholder after the stream ends
            # message_placeholder.markdown(full_response) # stream text
            
                # Check if there is content to display
                if completion.choices[0].delta.content is not None:
                    full_response += completion.choices[0].delta.content

            # Display the response in a text area
            st.text_area('Response:', value=full_response, height=400, key="response_text_area")
            
            st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
        except Exception as e:
            st.error(f"An error occurred: {e}")
else:
    # Warnings for user action required
    if not uploaded_file and analyze_button:
        st.warning("Please upload an image.")