MohammadArif commited on
Commit
fe6e7b2
·
verified ·
1 Parent(s): ba29f94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -62
app.py CHANGED
@@ -1,75 +1,78 @@
1
- import streamlit as st
2
  import easyocr
3
- import cv2
4
- import numpy as np
5
- from PIL import Image
6
- import io
7
  from transformers import pipeline
8
 
9
- # Initialize OCR reader
10
  reader = easyocr.Reader(['en'])
11
 
12
- # Initialize Hugging Face text classifier (use a pre-trained or custom model)
13
- classifier = pipeline("text-classification", model="distilbert-base-uncased")
14
-
15
- # Function to check if the image is clear
16
- def is_image_clear(image):
17
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
18
- laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
19
- return laplacian_var > 100 # Threshold to determine clarity
20
-
21
- # Function to extract text using OCR
22
- def extract_text_from_image(image):
23
- result = reader.readtext(image)
24
- text = " ".join([res[1] for res in result])
25
  return text
26
 
27
- # Function to analyze the text for abnormalities
28
- def analyze_report(text):
29
- result = classifier(text)
30
- severity = "Normal"
31
- explanation = "No abnormalities detected"
32
 
33
- # Example logic to determine severity (you can expand this)
34
- if 'elevated' in text or 'high' in text:
35
- severity = "Moderate"
36
- explanation = "Elevated levels detected (e.g., glucose, blood pressure)"
37
- elif 'critical' in text:
38
- severity = "Severe"
39
- explanation = "Critical levels detected"
40
- return severity, explanation
41
 
42
- # Streamlit UI
43
- st.title("Medical Report Analysis Chatbot")
 
 
 
 
 
 
 
 
44
 
45
- uploaded_file = st.file_uploader("Upload your medical report image", type=["jpg", "jpeg", "png"])
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- if uploaded_file:
48
- image = Image.open(uploaded_file)
49
- st.image(image, caption="Uploaded Medical Report", use_column_width=True)
 
50
 
51
- # Convert to OpenCV format for clarity check
52
- open_cv_image = np.array(image)
53
- open_cv_image = open_cv_image[:, :, ::-1].copy() # Convert RGB to BGR
54
 
55
- if is_image_clear(open_cv_image):
56
- st.success("Image clarity: Clear")
57
-
58
- # Extract text
59
- text = extract_text_from_image(open_cv_image)
60
- st.write("Extracted Text: ")
61
- st.write(text)
62
-
63
- # Analyze report
64
- severity, explanation = analyze_report(text)
65
- st.write(f"Report Type: Medical Report")
66
- st.write(f"Severity Level: {severity}")
67
- st.write(f"Explanation: {explanation}")
68
-
69
- # Ask for doctor consultation
70
- consultation = st.radio("Would you like to consult a doctor?", ("Yes", "No"))
71
- if consultation == "Yes":
72
- st.write("Consultation fee: $50")
73
- st.write("Your report will be sent to the doctor for further diagnosis.")
74
- else:
75
- st.warning("Image clarity: Blurry. Please upload a clearer image.")
 
 
 
1
  import easyocr
2
+ import requests
3
+ from transformers import AutoModelForTokenClassification, AutoTokenizer
 
 
4
  from transformers import pipeline
5
 
6
+ # Initialize EasyOCR for text extraction from medical report image
7
  reader = easyocr.Reader(['en'])
8
 
9
+ # Function to extract text from image
10
+ def extract_text_from_image(image_path):
11
+ result = reader.readtext(image_path)
12
+ text = ' '.join([item[1] for item in result])
 
 
 
 
 
 
 
 
 
13
  return text
14
 
15
+ # Load Med7 model for medical NER (Named Entity Recognition)
16
+ model_name = "jeff1evesque/med7"
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+ model = AutoModelForTokenClassification.from_pretrained(model_name)
 
19
 
20
+ # Initialize the NER pipeline with Med7 model
21
+ nlp = pipeline("ner", model=model, tokenizer=tokenizer)
 
 
 
 
 
 
22
 
23
+ # Function to extract medical entities (test results) from text
24
+ def extract_medical_values(text):
25
+ entities = nlp(text)
26
+ medical_data = {}
27
+
28
+ for entity in entities:
29
+ if entity['entity_group'] == 'LAB_RESULT': # Change based on Med7's NER labels
30
+ medical_data[entity['word']] = entity['score']
31
+
32
+ return medical_data
33
 
34
+ # Function to check the values with LabTestAPI or similar API
35
+ def validate_medical_value(test_name, test_value):
36
+ # Example of API call (replace with actual API)
37
+ api_url = "https://api.labtestapi.com/get_reference_range"
38
+ params = {
39
+ "test_name": test_name,
40
+ "value": test_value
41
+ }
42
+ response = requests.get(api_url, params=params)
43
+ if response.status_code == 200:
44
+ result = response.json()
45
+ return result['normal_range']
46
+ else:
47
+ return None
48
 
49
+ # Function to analyze medical report
50
+ def analyze_report(image_path):
51
+ # Step 1: Extract text from the medical report image
52
+ text = extract_text_from_image(image_path)
53
 
54
+ # Step 2: Extract medical values using Med7
55
+ medical_values = extract_medical_values(text)
 
56
 
57
+ analysis_results = []
58
+
59
+ for test_name, test_value in medical_values.items():
60
+ # Step 3: Validate test value against normal range
61
+ normal_range = validate_medical_value(test_name, test_value)
62
+ if normal_range:
63
+ if test_value < normal_range[0]:
64
+ analysis_results.append(f"{test_name} is low. Consider consulting a doctor.")
65
+ elif test_value > normal_range[1]:
66
+ analysis_results.append(f"{test_name} is high. Consult a doctor.")
67
+ else:
68
+ analysis_results.append(f"{test_name} is within the normal range.")
69
+ else:
70
+ analysis_results.append(f"Could not validate {test_name}.")
71
+
72
+ return analysis_results
73
+
74
+ # Example Usage
75
+ image_path = "path_to_medical_report_image.png"
76
+ analysis = analyze_report(image_path)
77
+ for result in analysis:
78
+ print(result)