Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,9 +2,6 @@ import streamlit as st
|
|
| 2 |
import os
|
| 3 |
import time
|
| 4 |
import re
|
| 5 |
-
import json
|
| 6 |
-
import requests
|
| 7 |
-
from PIL import Image
|
| 8 |
from openai import OpenAI
|
| 9 |
|
| 10 |
# ------------------ App Configuration ------------------
|
|
@@ -12,25 +9,17 @@ st.set_page_config(page_title="Document AI Assistant", layout="wide")
|
|
| 12 |
st.title("π Document AI Assistant")
|
| 13 |
st.caption("Chat with an AI Assistant on your medical/pathology documents")
|
| 14 |
|
| 15 |
-
# ------------------ Load API Key and Assistant ID ------------------
|
| 16 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
| 17 |
ASSISTANT_ID = os.environ.get("ASSISTANT_ID")
|
| 18 |
|
|
|
|
| 19 |
if not OPENAI_API_KEY or not ASSISTANT_ID:
|
| 20 |
st.error("Missing secrets. Please ensure both OPENAI_API_KEY and ASSISTANT_ID are set in your Hugging Face Space secrets.")
|
| 21 |
st.stop()
|
| 22 |
|
| 23 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 24 |
|
| 25 |
-
# ------------------ Load Structured JSON ------------------
|
| 26 |
-
STRUCTURED_JSON_PATH = "51940670-Manual-of-Surgical-Pathology-Third-Edition_1_structured_output.json"
|
| 27 |
-
try:
|
| 28 |
-
with open(STRUCTURED_JSON_PATH, "r") as f:
|
| 29 |
-
structured_data = json.load(f)
|
| 30 |
-
except Exception as e:
|
| 31 |
-
st.error(f"β Failed to load structured summary file: {e}")
|
| 32 |
-
st.stop()
|
| 33 |
-
|
| 34 |
# ------------------ Session State Initialization ------------------
|
| 35 |
if "messages" not in st.session_state:
|
| 36 |
st.session_state.messages = []
|
|
@@ -52,46 +41,69 @@ if st.sidebar.button("π Clear Chat"):
|
|
| 52 |
|
| 53 |
show_image = st.sidebar.checkbox("π Show Document Image", value=True)
|
| 54 |
|
| 55 |
-
# ------------------
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
break
|
| 85 |
-
time.sleep(1)
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
latest_message = messages.data[0]
|
| 90 |
-
if latest_message.role == "assistant":
|
| 91 |
-
assistant_message = latest_message.content[0].text.value
|
| 92 |
-
st.session_state.messages.insert(0, {"role": "assistant", "content": assistant_message})
|
| 93 |
|
| 94 |
-
# Extract GitHub image
|
| 95 |
image_match = re.search(
|
| 96 |
r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
|
| 97 |
assistant_message
|
|
@@ -99,39 +111,7 @@ def query_assistant(prompt):
|
|
| 99 |
if image_match:
|
| 100 |
st.session_state.image_url = image_match.group(0)
|
| 101 |
st.session_state.image_updated = True
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
except Exception as e:
|
| 105 |
-
st.error(f"β Error: {str(e)}")
|
| 106 |
-
return None
|
| 107 |
-
|
| 108 |
-
# ------------------ Layout ------------------
|
| 109 |
-
left, center = st.columns([1, 2])
|
| 110 |
-
|
| 111 |
-
# ------------------ Center Column: Chat UI with Static Input on Top ------------------
|
| 112 |
-
with center:
|
| 113 |
-
st.subheader("π¬ Document AI Assistant")
|
| 114 |
|
| 115 |
-
# Static Chat Input Bar
|
| 116 |
-
with st.container():
|
| 117 |
-
prompt = st.text_input("π‘ Ask a question about the document:", key="chat_input")
|
| 118 |
-
if prompt:
|
| 119 |
-
query_assistant(prompt)
|
| 120 |
-
st.rerun() # Rerun to refresh with new message
|
| 121 |
-
|
| 122 |
-
# Show messages: latest at top
|
| 123 |
-
for message in st.session_state.messages:
|
| 124 |
-
role = message["role"]
|
| 125 |
-
with st.chat_message(role):
|
| 126 |
-
st.markdown(message["content"])
|
| 127 |
-
|
| 128 |
-
# ------------------ Left Column: Document Image ------------------
|
| 129 |
-
with left:
|
| 130 |
-
st.subheader("π Document Image")
|
| 131 |
-
if show_image and st.session_state.image_url:
|
| 132 |
-
try:
|
| 133 |
-
image = Image.open(requests.get(st.session_state.image_url, stream=True).raw)
|
| 134 |
-
st.image(image, caption="π Extracted Page", use_container_width=True)
|
| 135 |
-
st.session_state.image_updated = False
|
| 136 |
except Exception as e:
|
| 137 |
-
st.
|
|
|
|
| 2 |
import os
|
| 3 |
import time
|
| 4 |
import re
|
|
|
|
|
|
|
|
|
|
| 5 |
from openai import OpenAI
|
| 6 |
|
| 7 |
# ------------------ App Configuration ------------------
|
|
|
|
| 9 |
st.title("π Document AI Assistant")
|
| 10 |
st.caption("Chat with an AI Assistant on your medical/pathology documents")
|
| 11 |
|
| 12 |
+
# ------------------ Load API Key and Assistant ID from Hugging Face Secrets ------------------
|
| 13 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
| 14 |
ASSISTANT_ID = os.environ.get("ASSISTANT_ID")
|
| 15 |
|
| 16 |
+
# ------------------ Error Handling for Missing Secrets ------------------
|
| 17 |
if not OPENAI_API_KEY or not ASSISTANT_ID:
|
| 18 |
st.error("Missing secrets. Please ensure both OPENAI_API_KEY and ASSISTANT_ID are set in your Hugging Face Space secrets.")
|
| 19 |
st.stop()
|
| 20 |
|
| 21 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# ------------------ Session State Initialization ------------------
|
| 24 |
if "messages" not in st.session_state:
|
| 25 |
st.session_state.messages = []
|
|
|
|
| 41 |
|
| 42 |
show_image = st.sidebar.checkbox("π Show Document Image", value=True)
|
| 43 |
|
| 44 |
+
# ------------------ Split Layout ------------------
|
| 45 |
+
col1, col2 = st.columns([1, 2]) # Adjust ratio as needed
|
| 46 |
+
|
| 47 |
+
# ------------------ Image Panel (Left) ------------------
|
| 48 |
+
with col1:
|
| 49 |
+
if show_image and st.session_state.image_url:
|
| 50 |
+
st.image(st.session_state.image_url, caption="π Extracted Page", use_container_width=True)
|
| 51 |
+
st.session_state.image_updated = False # Reset flag after rendering
|
| 52 |
+
|
| 53 |
+
# ------------------ Chat Panel (Right) ------------------
|
| 54 |
+
with col2:
|
| 55 |
+
for message in st.session_state.messages:
|
| 56 |
+
role, content = message["role"], message["content"]
|
| 57 |
+
st.chat_message(role).write(content)
|
| 58 |
+
|
| 59 |
+
if prompt := st.chat_input("Type your question about the document..."):
|
| 60 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 61 |
+
st.chat_message("user").write(prompt)
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
# Initialize thread if needed
|
| 65 |
+
if st.session_state.thread_id is None:
|
| 66 |
+
thread = client.beta.threads.create()
|
| 67 |
+
st.session_state.thread_id = thread.id
|
| 68 |
+
|
| 69 |
+
thread_id = st.session_state.thread_id
|
| 70 |
+
|
| 71 |
+
# Send message to assistant
|
| 72 |
+
client.beta.threads.messages.create(
|
| 73 |
+
thread_id=thread_id,
|
| 74 |
+
role="user",
|
| 75 |
+
content=prompt
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Run assistant
|
| 79 |
+
run = client.beta.threads.runs.create(
|
| 80 |
+
thread_id=thread_id,
|
| 81 |
+
assistant_id=ASSISTANT_ID
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# Wait for assistant response
|
| 85 |
+
with st.spinner("Assistant is thinking..."):
|
| 86 |
+
while True:
|
| 87 |
+
run_status = client.beta.threads.runs.retrieve(
|
| 88 |
+
thread_id=thread_id,
|
| 89 |
+
run_id=run.id
|
| 90 |
+
)
|
| 91 |
+
if run_status.status == "completed":
|
| 92 |
+
break
|
| 93 |
+
time.sleep(1)
|
| 94 |
+
|
| 95 |
+
# Get assistant response
|
| 96 |
+
messages = client.beta.threads.messages.list(thread_id=thread_id)
|
| 97 |
+
assistant_message = None
|
| 98 |
+
for message in reversed(messages.data):
|
| 99 |
+
if message.role == "assistant":
|
| 100 |
+
assistant_message = message.content[0].text.value
|
| 101 |
break
|
|
|
|
| 102 |
|
| 103 |
+
st.chat_message("assistant").write(assistant_message)
|
| 104 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_message})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
+
# Extract GitHub image from response if available
|
| 107 |
image_match = re.search(
|
| 108 |
r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
|
| 109 |
assistant_message
|
|
|
|
| 111 |
if image_match:
|
| 112 |
st.session_state.image_url = image_match.group(0)
|
| 113 |
st.session_state.image_updated = True
|
| 114 |
+
st.rerun() # Trigger rerun to refresh image display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
except Exception as e:
|
| 117 |
+
st.error(f"β Error: {str(e)}")
|