IAMTFRMZA commited on
Commit
607c738
Β·
verified Β·
1 Parent(s): 2466a8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -86
app.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import time
4
  import re
5
  import requests
 
6
  from PIL import Image
7
  from io import BytesIO
8
  from openai import OpenAI
@@ -33,12 +34,11 @@ if not st.session_state.authenticated:
33
  login()
34
  st.stop()
35
 
36
- # ------------------ App Configuration ------------------
37
  st.set_page_config(page_title="AI Pathology Assistant", layout="wide", initial_sidebar_state="collapsed")
38
  st.title("🧬 AI Pathology Assistant")
39
- st.caption("AI-powered exploration of pathology, anatomy, and histology documents via OCR + GPT")
40
 
41
- # ------------------ Load OpenAI API Key ------------------
42
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
43
  if not OPENAI_API_KEY:
44
  st.error("❌ Missing OPENAI_API_KEY environment variable.")
@@ -46,10 +46,10 @@ if not OPENAI_API_KEY:
46
 
47
  client = OpenAI(api_key=OPENAI_API_KEY)
48
 
49
- # ------------------ Assistant Configuration ------------------
50
- ASSISTANT_ID = "asst_jXDSjCG8LI4HEaFEcjFVq8KB"
51
 
52
- # ------------------ Session State ------------------
53
  if "messages" not in st.session_state:
54
  st.session_state.messages = []
55
  if "thread_id" not in st.session_state:
@@ -59,107 +59,171 @@ if "image_urls" not in st.session_state:
59
  if "pending_prompt" not in st.session_state:
60
  st.session_state.pending_prompt = None
61
 
62
- # ------------------ Sidebar ------------------
63
- with st.sidebar:
64
- st.header("πŸ§ͺ Pathology Tools")
65
- if st.button("🧹 Clear Chat"):
66
- st.session_state.messages = []
67
- st.session_state.thread_id = None
68
- st.session_state.image_urls = []
69
- st.session_state.pending_prompt = None
70
- st.rerun()
71
-
72
- show_image = st.toggle("πŸ“Έ Show Images", value=True)
73
- keyword = st.text_input("Keyword Search", placeholder="e.g. mitosis, carcinoma")
74
- if st.button("πŸ”Ž Search") and keyword:
75
- st.session_state.pending_prompt = f"Find clauses or references related to: {keyword}"
76
-
77
- section = st.text_input("Section Lookup", placeholder="e.g. Connective Tissue")
78
- if section:
79
- st.session_state.pending_prompt = f"Summarize or list key points from section: {section}"
80
-
81
- actions = [
82
- "Select an action...",
83
- "List histological features of inflammation",
84
- "Summarize features of carcinoma",
85
- "List muscle types and features",
86
- "Extract diagnostic markers",
87
- "Summarize embryology stages"
88
- ]
89
- action = st.selectbox("Common Pathology Queries", actions)
90
- if action != actions[0]:
91
- st.session_state.pending_prompt = action
92
-
93
- # ------------------ Main Chat UI ------------------
94
- chat_col, image_col = st.columns([2, 1])
95
-
96
- with chat_col:
97
- st.markdown("### πŸ’¬ Ask a Pathology-Specific Question")
98
- user_input = st.chat_input("Example: What are features of squamous cell carcinoma?")
99
- if user_input:
100
- st.session_state.messages.append({"role": "user", "content": user_input})
101
- elif st.session_state.pending_prompt:
102
- st.session_state.messages.append({"role": "user", "content": st.session_state.pending_prompt})
103
- st.session_state.pending_prompt = None
104
-
105
- if st.session_state.messages and st.session_state.messages[-1]["role"] == "user":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  try:
107
- if st.session_state.thread_id is None:
 
108
  thread = client.beta.threads.create()
109
- st.session_state.thread_id = thread.id
110
 
 
111
  client.beta.threads.messages.create(
112
- thread_id=st.session_state.thread_id,
113
  role="user",
114
- content=st.session_state.messages[-1]["content"]
115
  )
116
 
 
117
  run = client.beta.threads.runs.create(
118
- thread_id=st.session_state.thread_id,
119
- assistant_id=ASSISTANT_ID
120
  )
121
 
122
- with st.spinner("πŸ”¬ Analyzing..."):
123
  while True:
124
- status = client.beta.threads.runs.retrieve(thread_id=st.session_state.thread_id, run_id=run.id)
125
- if status.status in ("completed", "failed", "cancelled"):
126
  break
127
  time.sleep(1)
128
 
129
- if status.status == "completed":
130
- messages = client.beta.threads.messages.list(thread_id=st.session_state.thread_id)
131
  for m in reversed(messages.data):
132
  if m.role == "assistant":
133
- reply = m.content[0].text.value
134
- st.session_state.messages.append({"role": "assistant", "content": reply})
135
-
136
- # Extract GitHub raw image URLs (already encoded by assistant)
137
- image_matches = re.findall(
138
- r'https://raw\.githubusercontent\.com/AndrewLORTech/witspathologai/main/[^\s\n"]+\.png',
139
- reply
140
- )
141
- st.session_state.image_urls = image_matches
142
  break
143
  else:
144
- st.error("❌ Assistant failed to respond.")
145
- st.rerun()
146
  except Exception as e:
147
- st.error(f"❌ Error: {e}")
148
 
149
- for msg in st.session_state.messages:
150
- with st.chat_message(msg["role"]):
151
- st.markdown(msg["content"], unsafe_allow_html=True)
 
152
 
153
- # ------------------ Image Preview (No Re-Encoding) ------------------
154
- with image_col:
155
- if show_image and st.session_state.image_urls:
156
- st.markdown("### πŸ–ΌοΈ Image(s)")
157
- for raw_url in st.session_state.image_urls:
158
  try:
159
- # βœ… Use assistant's already-encoded URL directly
160
- r = requests.get(raw_url, timeout=5)
161
  r.raise_for_status()
162
  img = Image.open(BytesIO(r.content))
163
- st.image(img, caption=f"πŸ“· {raw_url.split('/')[-1]}", use_container_width=True)
164
- except Exception:
165
- continue # Silently skip broken images
 
 
3
  import time
4
  import re
5
  import requests
6
+ import json
7
  from PIL import Image
8
  from io import BytesIO
9
  from openai import OpenAI
 
34
  login()
35
  st.stop()
36
 
37
+ # ------------------ App Config ------------------
38
  st.set_page_config(page_title="AI Pathology Assistant", layout="wide", initial_sidebar_state="collapsed")
39
  st.title("🧬 AI Pathology Assistant")
 
40
 
41
+ # ------------------ Load OpenAI ------------------
42
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
43
  if not OPENAI_API_KEY:
44
  st.error("❌ Missing OPENAI_API_KEY environment variable.")
 
46
 
47
  client = OpenAI(api_key=OPENAI_API_KEY)
48
 
49
+ # ------------------ Assistant Setup ------------------
50
+ ASSISTANT_ID = "asst_9v09zgizdcuuhNdcFQpRo9RO"
51
 
52
+ # ------------------ State ------------------
53
  if "messages" not in st.session_state:
54
  st.session_state.messages = []
55
  if "thread_id" not in st.session_state:
 
59
  if "pending_prompt" not in st.session_state:
60
  st.session_state.pending_prompt = None
61
 
62
+ # ------------------ Tabs ------------------
63
+ tab1, tab2 = st.tabs(["πŸ’¬ Chat Assistant", "πŸ–ΌοΈ Visual Reference Search"])
64
+
65
+ # ------------------ Tab 1: Chat Assistant ------------------
66
+ with tab1:
67
+ with st.sidebar:
68
+ st.header("πŸ§ͺ Pathology Tools")
69
+ if st.button("🧹 Clear Chat"):
70
+ st.session_state.messages = []
71
+ st.session_state.thread_id = None
72
+ st.session_state.image_urls = []
73
+ st.session_state.pending_prompt = None
74
+ st.rerun()
75
+
76
+ show_image = st.toggle("πŸ“Έ Show Images", value=True)
77
+ keyword = st.text_input("Keyword Search", placeholder="e.g. mitosis, carcinoma")
78
+ if st.button("πŸ”Ž Search") and keyword:
79
+ st.session_state.pending_prompt = f"Find clauses or references related to: {keyword}"
80
+
81
+ section = st.text_input("Section Lookup", placeholder="e.g. Connective Tissue")
82
+ if section:
83
+ st.session_state.pending_prompt = f"Summarize or list key points from section: {section}"
84
+
85
+ actions = [
86
+ "Select an action...",
87
+ "List histological features of inflammation",
88
+ "Summarize features of carcinoma",
89
+ "List muscle types and features",
90
+ "Extract diagnostic markers",
91
+ "Summarize embryology stages"
92
+ ]
93
+ action = st.selectbox("Common Pathology Queries", actions)
94
+ if action != actions[0]:
95
+ st.session_state.pending_prompt = action
96
+
97
+ chat_col, image_col = st.columns([2, 1])
98
+
99
+ with chat_col:
100
+ st.markdown("### πŸ’¬ Ask a Pathology-Specific Question")
101
+ user_input = st.chat_input("Example: What are features of squamous cell carcinoma?")
102
+ if user_input:
103
+ st.session_state.messages.append({"role": "user", "content": user_input})
104
+ elif st.session_state.pending_prompt:
105
+ st.session_state.messages.append({"role": "user", "content": st.session_state.pending_prompt})
106
+ st.session_state.pending_prompt = None
107
+
108
+ if st.session_state.messages and st.session_state.messages[-1]["role"] == "user":
109
+ try:
110
+ if st.session_state.thread_id is None:
111
+ thread = client.beta.threads.create()
112
+ st.session_state.thread_id = thread.id
113
+
114
+ client.beta.threads.messages.create(
115
+ thread_id=st.session_state.thread_id,
116
+ role="user",
117
+ content=st.session_state.messages[-1]["content"]
118
+ )
119
+
120
+ run = client.beta.threads.runs.create(
121
+ thread_id=st.session_state.thread_id,
122
+ assistant_id=ASSISTANT_ID
123
+ )
124
+
125
+ with st.spinner("πŸ”¬ Analyzing..."):
126
+ while True:
127
+ status = client.beta.threads.runs.retrieve(thread_id=st.session_state.thread_id, run_id=run.id)
128
+ if status.status in ("completed", "failed", "cancelled"):
129
+ break
130
+ time.sleep(1)
131
+
132
+ if status.status == "completed":
133
+ messages = client.beta.threads.messages.list(thread_id=st.session_state.thread_id)
134
+ for m in reversed(messages.data):
135
+ if m.role == "assistant":
136
+ reply = m.content[0].text.value
137
+ st.session_state.messages.append({"role": "assistant", "content": reply})
138
+ image_matches = re.findall(
139
+ r'https://raw\.githubusercontent\.com/AndrewLORTech/witspathologai/main/[^\s\n"]+\.png',
140
+ reply
141
+ )
142
+ st.session_state.image_urls = image_matches
143
+ break
144
+ else:
145
+ st.error("❌ Assistant failed to respond.")
146
+ st.rerun()
147
+ except Exception as e:
148
+ st.error(f"❌ Error: {e}")
149
+
150
+ for msg in st.session_state.messages:
151
+ with st.chat_message(msg["role"]):
152
+ st.markdown(msg["content"], unsafe_allow_html=True)
153
+
154
+ with image_col:
155
+ if show_image and st.session_state.image_urls:
156
+ st.markdown("### πŸ–ΌοΈ Image(s)")
157
+ for raw_url in st.session_state.image_urls:
158
+ try:
159
+ r = requests.get(raw_url, timeout=5)
160
+ r.raise_for_status()
161
+ img = Image.open(BytesIO(r.content))
162
+ st.image(img, caption=f"πŸ“· {raw_url.split('/')[-1]}", use_container_width=True)
163
+ except Exception:
164
+ continue
165
+
166
+ # ------------------ Tab 2: Visual Search ------------------
167
+ with tab2:
168
+ st.title("πŸ” Visual Reference Search")
169
+ user_query = st.text_input("Enter keyword to search images (e.g. ovary, thyroid, mitosis)")
170
+
171
+ if "image_thread_id" not in st.session_state:
172
+ st.session_state.image_thread_id = None
173
+ if "image_response" not in st.session_state:
174
+ st.session_state.image_response = None
175
+
176
+ if st.button("Ask Assistant") and user_query:
177
  try:
178
+ # Create thread if needed
179
+ if st.session_state.image_thread_id is None:
180
  thread = client.beta.threads.create()
181
+ st.session_state.image_thread_id = thread.id
182
 
183
+ # Send user message
184
  client.beta.threads.messages.create(
185
+ thread_id=st.session_state.image_thread_id,
186
  role="user",
187
+ content=user_query
188
  )
189
 
190
+ # Run assistant
191
  run = client.beta.threads.runs.create(
192
+ thread_id=st.session_state.image_thread_id,
193
+ assistant_id="asst_9v09zgizdcuuhNdcFQpRo9RO"
194
  )
195
 
196
+ with st.spinner("πŸ”¬ Searching for visual references..."):
197
  while True:
198
+ run_status = client.beta.threads.runs.retrieve(thread_id=st.session_state.image_thread_id, run_id=run.id)
199
+ if run_status.status in ("completed", "failed", "cancelled"):
200
  break
201
  time.sleep(1)
202
 
203
+ if run_status.status == "completed":
204
+ messages = client.beta.threads.messages.list(thread_id=st.session_state.image_thread_id)
205
  for m in reversed(messages.data):
206
  if m.role == "assistant":
207
+ st.session_state.image_response = m.content[0].text.value
 
 
 
 
 
 
 
 
208
  break
209
  else:
210
+ st.error("❌ Assistant failed to return an image match.")
 
211
  except Exception as e:
212
+ st.error(f"Error: {e}")
213
 
214
+ # Render assistant's structured image reply
215
+ if st.session_state.image_response:
216
+ st.markdown("### 🧠 Assistant Response")
217
+ st.markdown(st.session_state.image_response, unsafe_allow_html=True)
218
 
219
+ # Extract and render image(s)
220
+ image_links = re.findall(r'https://raw\\.githubusercontent\\.com/[^\s"\')]+\\.png', st.session_state.image_response)
221
+ for img_url in image_links:
 
 
222
  try:
223
+ r = requests.get(img_url, timeout=5)
 
224
  r.raise_for_status()
225
  img = Image.open(BytesIO(r.content))
226
+ st.image(img, caption=img_url.split('/')[-1], use_container_width=True)
227
+ except:
228
+ st.warning(f"⚠️ Failed to load: {img_url}")
229
+