BioWhisper / app.py
ashhal's picture
Create app.py
75faa01 verified
raw
history blame
2.91 kB
# πŸ“¦ Installations needed locally before deploying:
# Linux: sudo apt install tesseract-ocr poppler-utils
# Windows: Install Tesseract from https://github.com/tesseract-ocr/tesseract
import gradio as gr
from pdf2image import convert_from_path
from PIL import Image
import pytesseract
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load MedAlpaca Model from Hugging Face
model_name = "medalpaca/medalpaca-7b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# ========== OCR FUNCTIONS ==========
def extract_text_from_image(image):
return pytesseract.image_to_string(image)
def extract_text_from_pdf(pdf_file):
try:
images = convert_from_path(pdf_file.name)
text = ""
for page in images:
text += pytesseract.image_to_string(page) + "\n"
return text
except Exception as e:
return f"Error reading PDF: {e}"
# ========== MEDALPACA RESPONSE ==========
def generate_medical_explanation(text):
prompt = (
"You are a helpful medical assistant. Analyze the following patient's lab report text "
"and explain the abnormalities in plain, non-technical language:\n\n" + text +
"\n\nAlso, highlight abnormal values with flags."
)
inputs = tokenizer(prompt, return_tensors="pt").to(device)
outputs = model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7)
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
return result.split(prompt)[-1].strip()
# ========== MAIN APP FUNCTION ==========
def analyze_file(file):
if not file:
return "⚠️ No file uploaded.", ""
filename = file.name.lower()
if filename.endswith(".pdf"):
extracted_text = extract_text_from_pdf(file)
else:
try:
img = Image.open(file.name)
extracted_text = extract_text_from_image(img)
except Exception as e:
return f"❌ Error loading image: {e}", ""
if not extracted_text.strip():
return "❌ No text found. Try uploading a clearer image or PDF.", ""
ai_response = generate_medical_explanation(extracted_text)
return extracted_text, ai_response
# ========== GRADIO INTERFACE ==========
gr.Interface(
fn=analyze_file,
inputs=gr.File(label="πŸ“„ Upload Lab Report (Image or PDF)"),
outputs=[
gr.Textbox(label="πŸ“œ Extracted Text", lines=20),
gr.Textbox(label="🧠 MedAlpaca Interpretation", lines=20)
],
title="πŸ”¬ AI Lab Report Analyzer with MedAlpaca",
description="Upload your medical report (image or PDF). This app extracts text using OCR and explains lab values using the MedAlpaca model."
).launch()