witspathologyv2 / app.py
IAMTFRMZA's picture
Update app.py
9d2b563 verified
raw
history blame
6.6 kB
import streamlit as st
import os
import time
import re
import json
import requests
from PIL import Image
from openai import OpenAI
import easyocr
from io import BytesIO
# ------------------ App Configuration ------------------
st.set_page_config(page_title="Document AI Assistant", layout="wide")
st.title("πŸ“„ Document AI Assistant")
st.caption("Chat with an AI Assistant on your medical/pathology documents")
# ------------------ Load API Key and Assistant ID ------------------
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
ASSISTANT_ID = os.environ.get("ASSISTANT_ID")
if not OPENAI_API_KEY or not ASSISTANT_ID:
st.error("Missing secrets. Please ensure both OPENAI_API_KEY and ASSISTANT_ID are set in your Hugging Face Space secrets.")
st.stop()
client = OpenAI(api_key=OPENAI_API_KEY)
reader = easyocr.Reader(['en'], gpu=False)
# ------------------ Session State Initialization ------------------
if "messages" not in st.session_state:
st.session_state.messages = []
if "thread_id" not in st.session_state:
st.session_state.thread_id = None
if "image_url" not in st.session_state:
st.session_state.image_url = None
if "image_updated" not in st.session_state:
st.session_state.image_updated = False
# ------------------ Sidebar Controls ------------------
st.sidebar.header("πŸ”§ Settings")
if st.sidebar.button("πŸ”„ Clear Chat"):
st.session_state.messages = []
st.session_state.thread_id = None
st.session_state.image_url = None
st.session_state.image_updated = False
st.rerun()
show_image = st.sidebar.checkbox("πŸ“– Show Document Image", value=True)
# ------------------ OCR + GPT Summary & FAQ Generator ------------------
def generate_summary_and_faq_from_image_easyocr(image_url):
try:
response = requests.get(image_url, stream=True)
image = Image.open(BytesIO(response.content)).convert("RGB")
result = reader.readtext(np.array(image), detail=0)
extracted_text = "\n".join(result)
if not extracted_text.strip():
return "No readable text found in image.", []
prompt = f"""
You are a pathology assistant. Given this OCR-extracted text from a pathology textbook page, do the following:
1. Provide a concise summary of the main point (1-2 sentences).
2. Provide two FAQs with brief answers.
Text:
{extracted_text[:3000]}
Return only JSON:
{{
"summary": "...",
"faqs": [
{{"question": "...", "answer": "..."}},
{{"question": "...", "answer": "..."}}
]
}}
"""
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.3
)
result = json.loads(response.choices[0].message.content)
return result.get("summary", "No summary generated."), result.get("faqs", [])
except Exception as e:
return f"Error generating summary: {e}", []
# ------------------ Layout ------------------
left, center, right = st.columns([1, 2, 1])
# ------------------ Left Column: Document Image ------------------
with left:
st.subheader("πŸ“„ Document Image")
if show_image and st.session_state.image_url:
try:
image = Image.open(requests.get(st.session_state.image_url, stream=True).raw)
st.image(image, caption="πŸ“‘ Extracted Page", use_container_width=True)
st.session_state.image_updated = False
except Exception as e:
st.warning("⚠️ Could not load image.")
# ------------------ Center Column: Chat UI ------------------
with center:
st.subheader("πŸ’¬ Document AI Assistant")
for message in st.session_state.messages:
role, content = message["role"], message["content"]
st.chat_message(role).write(content)
if prompt := st.chat_input("Type your question about the document..."):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
try:
if st.session_state.thread_id is None:
thread = client.beta.threads.create()
st.session_state.thread_id = thread.id
thread_id = st.session_state.thread_id
client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=prompt
)
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=ASSISTANT_ID
)
with st.spinner("Assistant is thinking..."):
while True:
run_status = client.beta.threads.runs.retrieve(
thread_id=thread_id,
run_id=run.id
)
if run_status.status == "completed":
break
time.sleep(1)
messages = client.beta.threads.messages.list(thread_id=thread_id)
assistant_message = None
for message in reversed(messages.data):
if message.role == "assistant":
assistant_message = message.content[0].text.value
break
st.chat_message("assistant").write(assistant_message)
st.session_state.messages.append({"role": "assistant", "content": assistant_message})
# Extract GitHub image URL
image_match = re.search(
r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
assistant_message
)
if image_match:
st.session_state.image_url = image_match.group(0)
st.session_state.image_updated = True
st.rerun()
except Exception as e:
st.error(f"❌ Error: {str(e)}")
# ------------------ Right Column: OCR-Based Summary + FAQ ------------------
with right:
st.subheader("πŸ“Œ Summary & FAQ (via EasyOCR)")
if st.session_state.image_url:
with st.spinner("πŸ” Extracting text and generating summary..."):
summary_text, faq_list = generate_summary_and_faq_from_image_easyocr(st.session_state.image_url)
else:
summary_text = "No image selected."
faq_list = []
st.markdown(summary_text)
st.subheader("❓ Auto-Generated FAQ")
if faq_list:
for faq in faq_list:
st.markdown(f"**Q:** {faq.get('question', '')}\n\n**A:** {faq.get('answer', '')}")
else:
st.info("No FAQs available or generated from this page.")