ProfessorLeVesseur commited on
Commit
4996d49
·
verified ·
1 Parent(s): a2d596c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -63
app.py CHANGED
@@ -20,76 +20,76 @@ if uploaded_file:
20
  with st.expander("Image", expanded=True):
21
  st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True)
22
 
23
- # Toggle for additional details input
24
- show_details = st.checkbox("Add details about the image")
25
 
26
- if show_details:
27
- # Text input for additional details about the image, shown only if the toggle is True
28
  additional_details = st.text_area(
29
  "Add any additional details or context about the image here:",
30
  disabled=not show_details
31
  )
32
 
33
- # Button to trigger the analysis
34
- analyze_button = st.button("Analyse the MTSS Image")
35
 
36
- if analyze_button:
37
- with st.spinner("Analyzing the image..."):
38
- base64_image = encode_image(uploaded_file)
39
- prompt_text = (
40
- "You are a highly knowledgeable accessibility expert. "
41
- "Your task is to examine the following image in detail. "
42
- "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
43
- "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
44
- "Create a detailed image caption explaining in 150 words or less. "
45
- )
46
 
47
- if show_details and additional_details:
48
- prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
49
 
50
- # Create the payload for the completion request
51
- messages = [
52
- {
53
- "role": "user",
54
- "content": [
55
- {"type": "text", "text": prompt_text},
56
- {
57
- "type": "image_url",
58
- "image_url": f"data:image/jpeg;base64,{base64_image}",
59
- },
60
- ],
61
- }
62
- ]
63
 
64
- # Make the request to the OpenAI API
65
- try:
66
- # Without Stream
67
-
68
- # response = openai.chat.completions.create(
69
- # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
70
- # )
71
-
72
- # Stream the response
73
- full_response = ""
74
- message_placeholder = st.empty()
75
- for completion in openai.chat.completions.create(
76
- model="gpt-4-vision-preview", messages=messages,
77
- max_tokens=150, stream=True
78
- ):
79
- # Check if there is content to display
80
- if completion.choices[0].delta.content is not None:
81
- full_response += completion.choices[0].delta.content
82
- message_placeholder.markdown(full_response + "▌")
83
- # Final update to placeholder after the stream ends
84
- message_placeholder.markdown(full_response)
85
-
86
- # Display the response in the app
87
- # st.write(response.choices[0].message.content)
88
- except Exception as e:
89
- st.error(f"An error occurred: {e}")
90
- else:
91
- # Warnings for user action required
92
- if not uploaded_file and analyze_button:
93
- st.warning("Please upload an image.")
94
- if not api_key:
95
- st.warning("Please enter your OpenAI API key.")
 
20
  with st.expander("Image", expanded=True):
21
  st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True)
22
 
23
+ # Toggle for additional details input
24
+ show_details = st.checkbox("Add details about the image")
25
 
26
+ if show_details:
27
+ # Text input for additional details about the image, shown only if toggle is True
28
  additional_details = st.text_area(
29
  "Add any additional details or context about the image here:",
30
  disabled=not show_details
31
  )
32
 
33
+ # Button to trigger the analysis
34
+ analyze_button = st.button("Analyse the MTSS Image")
35
 
36
+ if analyze_button:
37
+ with st.spinner("Analyzing the image..."):
38
+ base64_image = encode_image(uploaded_file)
39
+ prompt_text = (
40
+ "You are a highly knowledgeable accessibility expert. "
41
+ "Your task is to examine the following image in detail. "
42
+ "Provide a comprehensive, factual, and accurate explanation of what the image depicts. "
43
+ "Highlight key elements and their significance, and present your analysis in clear, well-structured format. "
44
+ "Create a detailed image caption explaining in 150 words or less. "
45
+ )
46
 
47
+ if show_details and additional_details:
48
+ prompt_text += f"\n\nAdditional Context Provided by the User:\n{additional_details}"
49
 
50
+ # Create the payload for the completion request
51
+ messages = [
52
+ {
53
+ "role": "user",
54
+ "content": [
55
+ {"type": "text", "text": prompt_text},
56
+ {
57
+ "type": "image_url",
58
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
59
+ },
60
+ ],
61
+ }
62
+ ]
63
 
64
+ # Make the request to the OpenAI API
65
+ try:
66
+ # Without Stream
67
+
68
+ # response = openai.chat.completions.create(
69
+ # model="gpt-4-vision-preview", messages=messages, max_tokens=500, stream=False
70
+ # )
71
+
72
+ # Stream the response
73
+ full_response = ""
74
+ message_placeholder = st.empty()
75
+ for completion in openai.chat.completions.create(
76
+ model="gpt-4-vision-preview", messages=messages,
77
+ max_tokens=150, stream=True
78
+ ):
79
+ # Check if there is content to display
80
+ if completion.choices[0].delta.content is not None:
81
+ full_response += completion.choices[0].delta.content
82
+ message_placeholder.markdown(full_response + "▌")
83
+ # Final update to placeholder after the stream ends
84
+ message_placeholder.markdown(full_response)
85
+
86
+ # Display the response in the app
87
+ # st.write(response.choices[0].message.content)
88
+ except Exception as e:
89
+ st.error(f"An error occurred: {e}")
90
+ else:
91
+ # Warnings for user action required
92
+ if not uploaded_file and analyze_button:
93
+ st.warning("Please upload an image.")
94
+ if not api_key:
95
+ st.warning("Please enter your OpenAI API key.")