ProfessorLeVesseur commited on
Commit
8ec3355
·
verified ·
1 Parent(s): a732ab3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -10
app.py CHANGED
@@ -116,6 +116,7 @@
116
 
117
 
118
 
 
119
  import streamlit as st
120
  import base64
121
  import openai
@@ -127,26 +128,42 @@ def encode_image(image_file):
127
  # Streamlit page setup
128
  st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="collapsed")
129
 
 
 
 
 
 
 
 
 
 
 
130
  # File uploader allows user to add their own image
131
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
132
 
 
 
 
 
 
 
133
  # Toggle for showing additional details input
134
- show_details = st.checkbox("Optional: Add details about the image. "
135
  "The details could include specific information that is important to include in the alt text or reflect why the image is being used.", value=False)
136
 
137
  # Toggle for modifying the prompt for complex images
138
- complex_image = st.checkbox("Is this a complex image? "
139
- "By checking this box, it will inform MTSS.ai to create a robust description that exceeds the 124 character limit. You will add the description to a placeholder behind the image. Add 'Description in the content placeholder' in the alt text box.", value=False)
140
 
141
  if show_details:
142
- # Text input for additional details about the image, shown only if checkbox is True
143
  additional_details = st.text_area(
144
  "Add any additional details or context about the image here:",
145
  disabled=not show_details
146
  )
147
 
148
  # Button to trigger the analysis
149
- analyze_button = st.button("Analyze the Image")
150
 
151
  # Optimized prompt for complex images
152
  complex_image_prompt_text = (
@@ -162,7 +179,7 @@ if uploaded_file is not None and analyze_button:
162
  with st.spinner("Analyzing the image ..."):
163
  # Encode the image
164
  base64_image = encode_image(uploaded_file)
165
-
166
  # Determine which prompt to use based on the complexity of the image
167
  if complex_image:
168
  prompt_text = complex_image_prompt_text
@@ -173,10 +190,13 @@ if uploaded_file is not None and analyze_button:
173
  "Skip phrases like 'image of' or 'picture of.' "
174
  "Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative that serves as effective alternative text for accessibility purposes."
175
  )
176
-
177
  if show_details and additional_details:
178
- prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
 
 
179
 
 
180
  # Create the payload for the completion request
181
  messages = [
182
  {
@@ -190,14 +210,40 @@ if uploaded_file is not None and analyze_button:
190
  ],
191
  }
192
  ]
193
-
194
  # Make the request to the OpenAI API
195
  try:
 
 
 
 
 
 
 
196
  full_response = ""
197
- for completion in openai.ChatCompletion.create(
 
198
  model="gpt-4-vision-preview", messages=messages,
199
  max_tokens=250, stream=True
200
  ):
 
 
 
 
 
 
 
 
201
  if completion.choices[0].delta.content is not None:
202
  full_response += completion.choices[0].delta.content
203
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
 
118
 
119
+
120
  import streamlit as st
121
  import base64
122
  import openai
 
128
  # Streamlit page setup
129
  st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="collapsed")
130
 
131
+ #Add the image with a specified width
132
+ image_width = 300 # Set the desired width in pixels
133
+ st.image('MTSS.ai_Logo.png', width=image_width)
134
+
135
+ st.title('VisionText™ | Accessibility')
136
+ st.subheader(':green[_Image Alt Text Generator_]')
137
+
138
+ # Retrieve the OpenAI API Key from secrets
139
+ openai.api_key = st.secrets["openai_api_key"]
140
+
141
  # File uploader allows user to add their own image
142
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
143
 
144
+ if uploaded_file:
145
+ # Display the uploaded image with specified width
146
+ image_width = 150 # Set the desired width in pixels
147
+ with st.expander("Image", expanded=True):
148
+ st.image(uploaded_file, caption=uploaded_file.name, width=image_width, use_column_width=False)
149
+
150
  # Toggle for showing additional details input
151
+ show_details = st.toggle("Optional: Add details about the image. "
152
  "The details could include specific information that is important to include in the alt text or reflect why the image is being used.", value=False)
153
 
154
  # Toggle for modifying the prompt for complex images
155
+ complex_image = st.toggle("Is this a complex image? "
156
+ "By checking this box, it will inform MTSS.ai to create a robust description that exceeds the 125 character limit. You will add the description to a placeholder behind the image. Add 'Description in the content placeholder' in the alt text box.", value=False)
157
 
158
  if show_details:
159
+ # Text input for additional details about the image, shown only if toggle is True
160
  additional_details = st.text_area(
161
  "Add any additional details or context about the image here:",
162
  disabled=not show_details
163
  )
164
 
165
  # Button to trigger the analysis
166
+ analyze_button = st.button("Analyze the Image", type="secondary")
167
 
168
  # Optimized prompt for complex images
169
  complex_image_prompt_text = (
 
179
  with st.spinner("Analyzing the image ..."):
180
  # Encode the image
181
  base64_image = encode_image(uploaded_file)
182
+
183
  # Determine which prompt to use based on the complexity of the image
184
  if complex_image:
185
  prompt_text = complex_image_prompt_text
 
190
  "Skip phrases like 'image of' or 'picture of.' "
191
  "Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative that serves as effective alternative text for accessibility purposes."
192
  )
193
+
194
  if show_details and additional_details:
195
+ prompt_text += (
196
+ f"\n\nAdditional Context Provided by the User:\n{additional_details}"
197
+ )
198
 
199
+
200
  # Create the payload for the completion request
201
  messages = [
202
  {
 
210
  ],
211
  }
212
  ]
213
+
214
  # Make the request to the OpenAI API
215
  try:
216
+ # Without Stream
217
+
218
+ # response = openai.chat.completions.create(
219
+ # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
220
+ # )
221
+
222
+ # Stream the response
223
  full_response = ""
224
+ message_placeholder = st.empty()
225
+ for completion in openai.chat.completions.create(
226
  model="gpt-4-vision-preview", messages=messages,
227
  max_tokens=250, stream=True
228
  ):
229
+ # # Check if there is content to display
230
+ # if completion.choices[0].delta.content is not None:
231
+ # full_response += completion.choices[0].delta.content
232
+ # message_placeholder.markdown(full_response + "▌")
233
+ # # Final update to placeholder after the stream ends
234
+ # message_placeholder.markdown(full_response) # stream text
235
+
236
+ # Check if there is content to display
237
  if completion.choices[0].delta.content is not None:
238
  full_response += completion.choices[0].delta.content
239
 
240
+ # Display the response in a text area
241
+ st.text_area('Response:', value=full_response, height=400, key="response_text_area")
242
+
243
+ st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
244
+ except Exception as e:
245
+ st.error(f"An error occurred: {e}")
246
+ else:
247
+ # Warnings for user action required
248
+ if not uploaded_file and analyze_button:
249
+ st.warning("Please upload an image.")