File size: 3,998 Bytes
f76d77f
 
ea6a4e1
 
f76d77f
 
 
 
 
047420b
f76d77f
 
047420b
 
f76d77f
047420b
f76d77f
 
a2d596c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0424f5d
 
 
a2d596c
0424f5d
a2d596c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import streamlit as st
import base64
import openai

# Function to encode the image to base64
def encode_image(image_file):
    return base64.b64encode(image_file.getvalue()).decode("utf-8")

# Streamlit page setup
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="collapsed")
st.title("MTSS Snapshot: Accessibility Image Textifier: `Alt Text`")

# Retrieve the OpenAI API key from Streamlit secrets and set it
openai.api_key = st.secrets["openai_api_key"]

# File uploader for images
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])

if uploaded_file:
    with st.expander("Image", expanded=True):
        st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True)

    # Toggle for additional details input
    show_details = st.checkbox("Add details about the image")

    if show_details:
    # Text input for additional details about the image, shown only if the toggle is True
    additional_details = st.text_area(
        "Add any additional details or context about the image here:",
        disabled=not show_details
    )

    # Button to trigger the analysis
    analyze_button = st.button("Analyse the MTSS Image")

    if analyze_button:
        with st.spinner("Analyzing the image..."):
            base64_image = encode_image(uploaded_file)
            prompt_text = (
                "You are a highly knowledgeable accessibility expert. "
                "Your task is to examine the following image in detail. "
                "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
                "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
                "Create a detailed image caption explaining in 150 words or less. "
            )

            if show_details and additional_details:
                prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"

            # Create the payload for the completion request
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": prompt_text},
                        {
                            "type": "image_url",
                            "image_url": f"data:image/jpeg;base64,{base64_image}",
                        },
                    ],
                }
            ]

            # Make the request to the OpenAI API
            try:
                # Without Stream
                
                # response = openai.chat.completions.create(
                #     model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
                # )
        
                # Stream the response
                full_response = ""
                message_placeholder = st.empty()
                for completion in openai.chat.completions.create(
                    model="gpt-4-vision-preview", messages=messages, 
                    max_tokens=150, stream=True
                ):
                    # Check if there is content to display
                    if completion.choices[0].delta.content is not None:
                        full_response += completion.choices[0].delta.content
                        message_placeholder.markdown(full_response + "▌")
                # Final update to placeholder after the stream ends
                message_placeholder.markdown(full_response)
        
                # Display the response in the app
                # st.write(response.choices[0].message.content)
            except Exception as e:
                st.error(f"An error occurred: {e}")
    else:
        # Warnings for user action required
        if not uploaded_file and analyze_button:
            st.warning("Please upload an image.")
        if not api_key:
            st.warning("Please enter your OpenAI API key.")